diff --git a/.lock b/.lock new file mode 100755 index 000000000..e69de29bb diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/COPYRIGHT.txt b/COPYRIGHT.txt new file mode 100644 index 000000000..af77776cc --- /dev/null +++ b/COPYRIGHT.txt @@ -0,0 +1,45 @@ +These documentation pages include resources by third parties. This copyright +file applies only to those resources. The following third party resources are +included, and carry their own copyright notices and license terms: + +* Fira Sans (FiraSans-Regular.woff, FiraSans-Medium.woff): + + Copyright (c) 2014, Mozilla Foundation https://mozilla.org/ + with Reserved Font Name Fira Sans. + + Copyright (c) 2014, Telefonica S.A. + + Licensed under the SIL Open Font License, Version 1.1. + See FiraSans-LICENSE.txt. + +* rustdoc.css, main.js, and playpen.js: + + Copyright 2015 The Rust Developers. + Licensed under the Apache License, Version 2.0 (see LICENSE-APACHE.txt) or + the MIT license (LICENSE-MIT.txt) at your option. + +* normalize.css: + + Copyright (c) Nicolas Gallagher and Jonathan Neal. + Licensed under the MIT license (see LICENSE-MIT.txt). + +* Source Code Pro (SourceCodePro-Regular.woff, SourceCodePro-Semibold.woff): + + Copyright 2010, 2012 Adobe Systems Incorporated (http://www.adobe.com/), + with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark + of Adobe Systems Incorporated in the United States and/or other countries. + + Licensed under the SIL Open Font License, Version 1.1. + See SourceCodePro-LICENSE.txt. + +* Source Serif Pro (SourceSerifPro-Regular.ttf.woff, + SourceSerifPro-Bold.ttf.woff, SourceSerifPro-It.ttf.woff): + + Copyright 2014 Adobe Systems Incorporated (http://www.adobe.com/), with + Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of + Adobe Systems Incorporated in the United States and/or other countries. + + Licensed under the SIL Open Font License, Version 1.1. + See SourceSerifPro-LICENSE.txt. + +This copyright file is intended to be distributed with rustdoc output. diff --git a/FiraSans-LICENSE.txt b/FiraSans-LICENSE.txt new file mode 100644 index 000000000..d444ea92b --- /dev/null +++ b/FiraSans-LICENSE.txt @@ -0,0 +1,94 @@ +Digitized data copyright (c) 2012-2015, The Mozilla Foundation and Telefonica S.A. +with Reserved Font Name < Fira >, + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/FiraSans-Medium.woff b/FiraSans-Medium.woff new file mode 100644 index 000000000..7d742c5fb Binary files /dev/null and b/FiraSans-Medium.woff differ diff --git a/FiraSans-Regular.woff b/FiraSans-Regular.woff new file mode 100644 index 000000000..d8e0363f4 Binary files /dev/null and b/FiraSans-Regular.woff differ diff --git a/LICENSE-APACHE.txt b/LICENSE-APACHE.txt new file mode 100644 index 000000000..16fe87b06 --- /dev/null +++ b/LICENSE-APACHE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/LICENSE-MIT.txt b/LICENSE-MIT.txt new file mode 100644 index 000000000..31aa79387 --- /dev/null +++ b/LICENSE-MIT.txt @@ -0,0 +1,23 @@ +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/SourceCodePro-LICENSE.txt b/SourceCodePro-LICENSE.txt new file mode 100644 index 000000000..07542572e --- /dev/null +++ b/SourceCodePro-LICENSE.txt @@ -0,0 +1,93 @@ +Copyright 2010, 2012 Adobe Systems Incorporated (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe Systems Incorporated in the United States and/or other countries. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. + +This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/SourceCodePro-Regular.woff b/SourceCodePro-Regular.woff new file mode 100644 index 000000000..557667090 Binary files /dev/null and b/SourceCodePro-Regular.woff differ diff --git a/SourceCodePro-Semibold.woff b/SourceCodePro-Semibold.woff new file mode 100644 index 000000000..ca972a11d Binary files /dev/null and b/SourceCodePro-Semibold.woff differ diff --git a/SourceSerifPro-Bold.ttf.woff b/SourceSerifPro-Bold.ttf.woff new file mode 100644 index 000000000..ca254318f Binary files /dev/null and b/SourceSerifPro-Bold.ttf.woff differ diff --git a/SourceSerifPro-It.ttf.woff b/SourceSerifPro-It.ttf.woff new file mode 100644 index 000000000..a287bbe6e Binary files /dev/null and b/SourceSerifPro-It.ttf.woff differ diff --git a/SourceSerifPro-LICENSE.md b/SourceSerifPro-LICENSE.md new file mode 100644 index 000000000..22cb755f2 --- /dev/null +++ b/SourceSerifPro-LICENSE.md @@ -0,0 +1,93 @@ +Copyright 2014-2018 Adobe (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe in the United States and/or other countries. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. + +This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/SourceSerifPro-Regular.ttf.woff b/SourceSerifPro-Regular.ttf.woff new file mode 100644 index 000000000..a3d55cfdf Binary files /dev/null and b/SourceSerifPro-Regular.ttf.woff differ diff --git a/brush.svg b/brush.svg new file mode 100644 index 000000000..ea266e856 --- /dev/null +++ b/brush.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/cfg_if/all.html b/cfg_if/all.html new file mode 100644 index 000000000..9cf0d3aff --- /dev/null +++ b/cfg_if/all.html @@ -0,0 +1,3 @@ +List of all items in this crate

[] + + List of all items

Macros

\ No newline at end of file diff --git a/cfg_if/index.html b/cfg_if/index.html new file mode 100644 index 000000000..12d0acc29 --- /dev/null +++ b/cfg_if/index.html @@ -0,0 +1,22 @@ +cfg_if - Rust

[][src]Crate cfg_if

A macro for defining #[cfg] if-else statements.

+

The macro provided by this crate, cfg_if, is similar to the if/elif C +preprocessor macro by allowing definition of a cascade of #[cfg] cases, +emitting the implementation which matches first.

+

This allows you to conveniently provide a long list #[cfg]'d blocks of code +without having to rewrite each clause multiple times.

+

Example

+
+cfg_if::cfg_if! {
+    if #[cfg(unix)] {
+        fn foo() { /* unix specific functionality */ }
+    } else if #[cfg(target_pointer_width = "32")] {
+        fn foo() { /* non-unix, 32-bit functionality */ }
+    } else {
+        fn foo() { /* fallback implementation */ }
+    }
+}
+
+

Macros

+
cfg_if

The main macro provided by this crate. See crate documentation for more +information.

+
\ No newline at end of file diff --git a/cfg_if/macro.cfg_if!.html b/cfg_if/macro.cfg_if!.html new file mode 100644 index 000000000..36f9a6d55 --- /dev/null +++ b/cfg_if/macro.cfg_if!.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to macro.cfg_if.html...

+ + + \ No newline at end of file diff --git a/cfg_if/macro.cfg_if.html b/cfg_if/macro.cfg_if.html new file mode 100644 index 000000000..4337233be --- /dev/null +++ b/cfg_if/macro.cfg_if.html @@ -0,0 +1,20 @@ +cfg_if::cfg_if - Rust

[][src]Macro cfg_if::cfg_if

+macro_rules! cfg_if {
+    ($(
+        if #[cfg($($meta:meta),*)] { $($tokens:tt)* }
+    ) else * else {
+        $($tokens2:tt)*
+    }) => { ... };
+    (
+        if #[cfg($($i_met:meta),*)] { $($i_tokens:tt)* }
+        $(
+            else if #[cfg($($e_met:meta),*)] { $($e_tokens:tt)* }
+        )*
+    ) => { ... };
+    (@__items ($($not:meta,)*) ; ) => { ... };
+    (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($tokens:tt)*) ), $($rest:tt)*) => { ... };
+    (@__identity $($tokens:tt)*) => { ... };
+}
+

The main macro provided by this crate. See crate documentation for more +information.

+
\ No newline at end of file diff --git a/cfg_if/sidebar-items.js b/cfg_if/sidebar-items.js new file mode 100644 index 000000000..dbc0f919b --- /dev/null +++ b/cfg_if/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"macro":[["cfg_if","The main macro provided by this crate. See crate documentation for more information."]]}); \ No newline at end of file diff --git a/dark.css b/dark.css new file mode 100644 index 000000000..5df3fef0b --- /dev/null +++ b/dark.css @@ -0,0 +1 @@ +body{background-color:#353535;color:#ddd;}h1,h2,h3:not(.impl):not(.method):not(.type):not(.tymethod),h4:not(.method):not(.type):not(.tymethod){color:#ddd;}h1.fqn{border-bottom-color:#d2d2d2;}h2,h3:not(.impl):not(.method):not(.type):not(.tymethod),h4:not(.method):not(.type):not(.tymethod){border-bottom-color:#d2d2d2;}.in-band{background-color:#353535;}.invisible{background:rgba(0,0,0,0);}.docblock code,.docblock-short code{background-color:#2A2A2A;}pre{background-color:#2A2A2A;}.sidebar{background-color:#505050;}*{scrollbar-color:rgb(64,65,67) #717171;}.sidebar{scrollbar-color:rgba(32,34,37,.6) transparent;}::-webkit-scrollbar-track{background-color:#717171;}::-webkit-scrollbar-thumb{background-color:rgba(32,34,37,.6);}.sidebar::-webkit-scrollbar-track{background-color:#717171;}.sidebar::-webkit-scrollbar-thumb{background-color:rgba(32,34,37,.6);}.sidebar .current{background-color:#333;}.source .sidebar{background-color:#353535;}.sidebar .location{border-color:#fff;background:#575757;color:#DDD;}.sidebar .version{border-bottom-color:#DDD;}.sidebar-title{border-top-color:#777;border-bottom-color:#777;}.block a:hover{background:#444;}.line-numbers span{color:#3B91E2;}.line-numbers .line-highlighted{background-color:#0a042f !important;}.docblock h1,.docblock h2,.docblock h3,.docblock h4,.docblock h5{border-bottom-color:#DDD;}.docblock table,.docblock table td,.docblock table th{border-color:#ddd;}.content .method .where,.content .fn .where,.content .where.fmt-newline{color:#ddd;}.content .highlighted{color:#eee !important;background-color:#616161;}.content .highlighted a,.content .highlighted span{color:#eee !important;}.content .highlighted.trait{background-color:#013191;}.content .highlighted.traitalias{background-color:#013191;}.content .highlighted.mod,.content .highlighted.externcrate{background-color:#afc6e4;}.content .highlighted.mod{background-color:#803a1b;}.content .highlighted.externcrate{background-color:#396bac;}.content .highlighted.enum{background-color:#5b4e68;}.content .highlighted.struct{background-color:#194e9f;}.content .highlighted.union{background-color:#b7bd49;}.content .highlighted.fn,.content .highlighted.method,.content .highlighted.tymethod{background-color:#4950ed;}.content .highlighted.type{background-color:#38902c;}.content .highlighted.foreigntype{background-color:#b200d6;}.content .highlighted.attr,.content .highlighted.derive,.content .highlighted.macro{background-color:#217d1c;}.content .highlighted.constant,.content .highlighted.static{background-color:#0063cc;}.content .highlighted.primitive{background-color:#00708a;}.content .highlighted.keyword{background-color:#884719;}.content .stability::before{color:#ccc;}.content span.enum,.content a.enum,.block a.current.enum{color:#82b089;}.content span.struct,.content a.struct,.block a.current.struct{color:#2dbfb8;}.content span.type,.content a.type,.block a.current.type{color:#ff7f00;}.content span.foreigntype,.content a.foreigntype,.block a.current.foreigntype{color:#dd7de8;}.content span.attr,.content a.attr,.block a.current.attr,.content span.derive,.content a.derive,.block a.current.derive,.content span.macro,.content a.macro,.block a.current.macro{color:#09bd00;}.content span.union,.content a.union,.block a.current.union{color:#a6ae37;}.content span.constant,.content a.constant,.block a.current.constant,.content span.static,.content a.static,.block a.current.static{color:#82a5c9;}.content span.primitive,.content a.primitive,.block a.current.primitive{color:#43aec7;}.content span.externcrate,.content span.mod,.content a.mod,.block a.current.mod{color:#bda000;}.content span.trait,.content a.trait,.block a.current.trait{color:#b78cf2;}.content span.traitalias,.content a.traitalias,.block a.current.traitalias{color:#b397da;}.content span.fn,.content a.fn,.block a.current.fn,.content span.method,.content a.method,.block a.current.method,.content span.tymethod,.content a.tymethod,.block a.current.tymethod,.content .fnname{color:#2BAB63;}.content span.keyword,.content a.keyword,.block a.current.keyword{color:#de5249;}pre.rust .comment{color:#8d8d8b;}pre.rust .doccomment{color:#8ca375;}nav:not(.sidebar){border-bottom-color:#4e4e4e;}nav.main .current{border-top-color:#eee;border-bottom-color:#eee;}nav.main .separator{border-color:#eee;}a{color:#ddd;}.docblock:not(.type-decl) a:not(.srclink):not(.test-arrow),.docblock-short a:not(.srclink):not(.test-arrow),.stability a{color:#D2991D;}.stab.internal a{color:#304FFE;}a.test-arrow{color:#dedede;}.collapse-toggle{color:#999;}#crate-search{color:#111;background-color:#f0f0f0;border-color:#000;box-shadow:0 0 0 1px #000,0 0 0 2px transparent;}.search-input{color:#111;background-color:#f0f0f0;box-shadow:0 0 0 1px #000,0 0 0 2px transparent;}.search-input:focus{border-color:#008dfd;}.search-focus:disabled{background-color:#c5c4c4;}#crate-search+.search-input:focus{box-shadow:0 0 8px 4px #078dd8;}.module-item .stab{color:#ddd;}.stab.unstable{background:#FFF5D6;border-color:#FFC600;color:#2f2f2f;}.stab.internal{background:#FFB9B3;border-color:#B71C1C;color:#2f2f2f;}.stab.deprecated{background:#F3DFFF;border-color:#7F0087;color:#2f2f2f;}.stab.portability{background:#C4ECFF;border-color:#7BA5DB;color:#2f2f2f;}.stab.portability>code{color:#ddd;}#help>div{background:#4d4d4d;border-color:#bfbfbf;}#help dt{border-color:#bfbfbf;background:rgba(0,0,0,0);color:black;}.since{color:grey;}tr.result span.primitive::after,tr.result span.keyword::after{color:#ddd;}.line-numbers :target{background-color:transparent;}pre.rust .kw{color:#ab8ac1;}pre.rust .kw-2,pre.rust .prelude-ty{color:#769acb;}pre.rust .number,pre.rust .string{color:#83a300;}pre.rust .self,pre.rust .bool-val,pre.rust .prelude-val,pre.rust .attribute,pre.rust .attribute .ident{color:#ee6868;}pre.rust .macro,pre.rust .macro-nonterminal{color:#3E999F;}pre.rust .lifetime{color:#d97f26;}pre.rust .question-mark{color:#ff9011;}.example-wrap>pre.line-number{border-color:#4a4949;}a.test-arrow{background-color:rgba(78,139,202,0.2);}a.test-arrow:hover{background-color:#4e8bca;}.toggle-label{color:#999;}:target>code,:target>.in-band{background-color:#494a3d;}pre.compile_fail{border-left:2px solid rgba(255,0,0,.8);}pre.compile_fail:hover,.information:hover+pre.compile_fail{border-left:2px solid #f00;}pre.should_panic{border-left:2px solid rgba(255,0,0,.8);}pre.should_panic:hover,.information:hover+pre.should_panic{border-left:2px solid #f00;}pre.ignore{border-left:2px solid rgba(255,142,0,.6);}pre.ignore:hover,.information:hover+pre.ignore{border-left:2px solid #ff9200;}.tooltip.compile_fail{color:rgba(255,0,0,.8);}.information>.compile_fail:hover{color:#f00;}.tooltip.should_panic{color:rgba(255,0,0,.8);}.information>.should_panic:hover{color:#f00;}.tooltip.ignore{color:rgba(255,142,0,.6);}.information>.ignore:hover{color:#ff9200;}.search-failed a{color:#0089ff;}.tooltip .tooltiptext{background-color:#000;color:#fff;border-color:#000;}.tooltip .tooltiptext::after{border-color:transparent black transparent transparent;}#titles>div:not(.selected){background-color:#252525;border-top-color:#252525;}#titles>div:hover,#titles>div.selected{border-top-color:#0089ff;}#titles>div>div.count{color:#888;}@media (max-width:700px){.sidebar-menu{background-color:#505050;border-bottom-color:#e0e0e0;border-right-color:#e0e0e0;}.sidebar-elems{background-color:#505050;border-right-color:#000;}#sidebar-filler{background-color:#505050;border-bottom-color:#e0e0e0;}}kbd{color:#000;background-color:#fafbfc;border-color:#d1d5da;border-bottom-color:#c6cbd1;box-shadow-color:#c6cbd1;}#theme-picker,#settings-menu{border-color:#e0e0e0;background:#f0f0f0;}#theme-picker:hover,#theme-picker:focus,#settings-menu:hover,#settings-menu:focus{border-color:#ffb900;}#theme-choices{border-color:#e0e0e0;background-color:#353535;}#theme-choices>button:not(:first-child){border-top-color:#e0e0e0;}#theme-choices>button:hover,#theme-choices>button:focus{background-color:#4e4e4e;}@media (max-width:700px){#theme-picker{background:#f0f0f0;}}#all-types{background-color:#505050;}#all-types:hover{background-color:#606060;}.search-results td span.alias{color:#fff;}.search-results td span.grey{color:#ccc;}#sidebar-toggle{background-color:#565656;}#sidebar-toggle:hover{background-color:#676767;}#source-sidebar{background-color:#565656;}#source-sidebar>.title{border-bottom-color:#ccc;}div.files>a:hover,div.name:hover{background-color:#444;}div.files>.selected{background-color:#333;}.setting-line>.title{border-bottom-color:#ddd;} \ No newline at end of file diff --git a/down-arrow.svg b/down-arrow.svg new file mode 100644 index 000000000..35437e77a --- /dev/null +++ b/down-arrow.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/favicon.ico b/favicon.ico new file mode 100644 index 000000000..b8ad23769 Binary files /dev/null and b/favicon.ico differ diff --git a/implementors/core/clone/trait.Clone.js b/implementors/core/clone/trait.Clone.js new file mode 100644 index 000000000..0827332b0 --- /dev/null +++ b/implementors/core/clone/trait.Clone.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<A: Clone + SimdArray> Clone for Simd<A>","synthetic":false,"types":["packed_simd::Simd"]},{"text":"impl<T: Clone> Clone for LexicographicallyOrdered<T>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Clone for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl Clone for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl Clone for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl Clone for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl Clone for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl Clone for msize","synthetic":false,"types":["packed_simd::masks::msize"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/cmp/trait.Eq.js b/implementors/core/cmp/trait.Eq.js new file mode 100644 index 000000000..2bbbb9fda --- /dev/null +++ b/implementors/core/cmp/trait.Eq.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Eq for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl Eq for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl Eq for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl Eq for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl Eq for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl Eq for msize","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl Eq for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Eq for LexicographicallyOrdered<i8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Eq for LexicographicallyOrdered<u8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl Eq for LexicographicallyOrdered<m8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Eq for LexicographicallyOrdered<i8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Eq for LexicographicallyOrdered<u8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl Eq for LexicographicallyOrdered<m8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Eq for LexicographicallyOrdered<i16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Eq for LexicographicallyOrdered<u16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl Eq for LexicographicallyOrdered<m16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Eq for LexicographicallyOrdered<i8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Eq for LexicographicallyOrdered<u8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl Eq for LexicographicallyOrdered<m8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Eq for LexicographicallyOrdered<i16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Eq for LexicographicallyOrdered<u16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl Eq for LexicographicallyOrdered<m16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Eq for LexicographicallyOrdered<i32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Eq for LexicographicallyOrdered<u32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl Eq for LexicographicallyOrdered<m32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Eq for LexicographicallyOrdered<i8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Eq for LexicographicallyOrdered<u8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl Eq for LexicographicallyOrdered<m8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Eq for LexicographicallyOrdered<i16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Eq for LexicographicallyOrdered<u16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl Eq for LexicographicallyOrdered<m16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Eq for LexicographicallyOrdered<i32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Eq for LexicographicallyOrdered<u32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl Eq for LexicographicallyOrdered<m32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Eq for LexicographicallyOrdered<i64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Eq for LexicographicallyOrdered<u64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl Eq for LexicographicallyOrdered<m64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Eq for LexicographicallyOrdered<i128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Eq for LexicographicallyOrdered<u128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl Eq for LexicographicallyOrdered<m128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Eq for LexicographicallyOrdered<i8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Eq for LexicographicallyOrdered<u8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl Eq for LexicographicallyOrdered<m8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Eq for LexicographicallyOrdered<i16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Eq for LexicographicallyOrdered<u16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl Eq for LexicographicallyOrdered<m16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Eq for LexicographicallyOrdered<i32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Eq for LexicographicallyOrdered<u32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl Eq for LexicographicallyOrdered<m32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Eq for LexicographicallyOrdered<i64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Eq for LexicographicallyOrdered<u64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl Eq for LexicographicallyOrdered<m64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Eq for LexicographicallyOrdered<i128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Eq for LexicographicallyOrdered<u128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl Eq for LexicographicallyOrdered<m128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Eq for LexicographicallyOrdered<i8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Eq for LexicographicallyOrdered<u8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl Eq for LexicographicallyOrdered<m8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Eq for LexicographicallyOrdered<i16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Eq for LexicographicallyOrdered<u16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl Eq for LexicographicallyOrdered<m16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Eq for LexicographicallyOrdered<i32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Eq for LexicographicallyOrdered<u32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl Eq for LexicographicallyOrdered<m32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Eq for LexicographicallyOrdered<i64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Eq for LexicographicallyOrdered<u64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl Eq for LexicographicallyOrdered<m64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Eq for LexicographicallyOrdered<i128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Eq for LexicographicallyOrdered<u128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl Eq for LexicographicallyOrdered<m128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Eq for LexicographicallyOrdered<isizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Eq for LexicographicallyOrdered<usizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl Eq for LexicographicallyOrdered<msizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Eq for LexicographicallyOrdered<isizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Eq for LexicographicallyOrdered<usizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl Eq for LexicographicallyOrdered<msizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Eq for LexicographicallyOrdered<isizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Eq for LexicographicallyOrdered<usizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Eq for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl Eq for LexicographicallyOrdered<msizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> Eq for cptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx2"]},{"text":"impl<T> Eq for LexicographicallyOrdered<cptrx2<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> Eq for mptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx2"]},{"text":"impl<T> Eq for LexicographicallyOrdered<mptrx2<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> Eq for cptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx4"]},{"text":"impl<T> Eq for LexicographicallyOrdered<cptrx4<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> Eq for mptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx4"]},{"text":"impl<T> Eq for LexicographicallyOrdered<mptrx4<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> Eq for cptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx8"]},{"text":"impl<T> Eq for LexicographicallyOrdered<cptrx8<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> Eq for mptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx8"]},{"text":"impl<T> Eq for LexicographicallyOrdered<mptrx8<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/cmp/trait.Ord.js b/implementors/core/cmp/trait.Ord.js new file mode 100644 index 000000000..a9adc6c26 --- /dev/null +++ b/implementors/core/cmp/trait.Ord.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Ord for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl Ord for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl Ord for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl Ord for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl Ord for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl Ord for msize","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl Ord for LexicographicallyOrdered<i8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<i128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<u128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<m128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<isizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<usizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<msizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<isizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<usizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<msizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<isizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<usizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Ord for LexicographicallyOrdered<msizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/cmp/trait.PartialEq.js b/implementors/core/cmp/trait.PartialEq.js new file mode 100644 index 000000000..1452c32f3 --- /dev/null +++ b/implementors/core/cmp/trait.PartialEq.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl PartialEq<m8> for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl PartialEq<m16> for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl PartialEq<m32> for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl PartialEq<m64> for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl PartialEq<m128> for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl PartialEq<msize> for msize","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl PartialEq<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i8; 2]>>> for LexicographicallyOrdered<i8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u8; 2]>>> for LexicographicallyOrdered<u8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m8; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m8; 2]>>> for LexicographicallyOrdered<m8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i8; 4]>>> for LexicographicallyOrdered<i8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u8; 4]>>> for LexicographicallyOrdered<u8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m8; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m8; 4]>>> for LexicographicallyOrdered<m8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i16; 2]>>> for LexicographicallyOrdered<i16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u16; 2]>>> for LexicographicallyOrdered<u16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m16; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m16; 2]>>> for LexicographicallyOrdered<m16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i8; 8]>>> for LexicographicallyOrdered<i8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u8; 8]>>> for LexicographicallyOrdered<u8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m8; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m8; 8]>>> for LexicographicallyOrdered<m8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i16; 4]>>> for LexicographicallyOrdered<i16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u16; 4]>>> for LexicographicallyOrdered<u16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m16; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m16; 4]>>> for LexicographicallyOrdered<m16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i32; 2]>>> for LexicographicallyOrdered<i32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u32; 2]>>> for LexicographicallyOrdered<u32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m32; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m32; 2]>>> for LexicographicallyOrdered<m32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[f32; 2]>>> for LexicographicallyOrdered<f32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i8; 16]>>> for LexicographicallyOrdered<i8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u8; 16]>>> for LexicographicallyOrdered<u8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m8; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m8; 16]>>> for LexicographicallyOrdered<m8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i16; 8]>>> for LexicographicallyOrdered<i16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u16; 8]>>> for LexicographicallyOrdered<u16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m16; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m16; 8]>>> for LexicographicallyOrdered<m16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i32; 4]>>> for LexicographicallyOrdered<i32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u32; 4]>>> for LexicographicallyOrdered<u32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[f32; 4]>>> for LexicographicallyOrdered<f32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m32; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m32; 4]>>> for LexicographicallyOrdered<m32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i64; 2]>>> for LexicographicallyOrdered<i64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u64; 2]>>> for LexicographicallyOrdered<u64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[f64; 2]>>> for LexicographicallyOrdered<f64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m64; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m64; 2]>>> for LexicographicallyOrdered<m64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i128; 1]>>> for LexicographicallyOrdered<i128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u128; 1]>>> for LexicographicallyOrdered<u128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m128; 1]>> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m128; 1]>>> for LexicographicallyOrdered<m128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i8; 32]>>> for LexicographicallyOrdered<i8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u8; 32]>>> for LexicographicallyOrdered<u8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m8; 32]>> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m8; 32]>>> for LexicographicallyOrdered<m8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i16; 16]>>> for LexicographicallyOrdered<i16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u16; 16]>>> for LexicographicallyOrdered<u16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m16; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m16; 16]>>> for LexicographicallyOrdered<m16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i32; 8]>>> for LexicographicallyOrdered<i32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u32; 8]>>> for LexicographicallyOrdered<u32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[f32; 8]>>> for LexicographicallyOrdered<f32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m32; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m32; 8]>>> for LexicographicallyOrdered<m32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i64; 4]>>> for LexicographicallyOrdered<i64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u64; 4]>>> for LexicographicallyOrdered<u64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[f64; 4]>>> for LexicographicallyOrdered<f64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m64; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m64; 4]>>> for LexicographicallyOrdered<m64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i128; 2]>>> for LexicographicallyOrdered<i128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u128; 2]>>> for LexicographicallyOrdered<u128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m128; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m128; 2]>>> for LexicographicallyOrdered<m128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i8; 64]>>> for LexicographicallyOrdered<i8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u8; 64]>>> for LexicographicallyOrdered<u8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m8; 64]>> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m8; 64]>>> for LexicographicallyOrdered<m8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i16; 32]>>> for LexicographicallyOrdered<i16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u16; 32]>>> for LexicographicallyOrdered<u16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m16; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m16; 32]>>> for LexicographicallyOrdered<m16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i32; 16]>>> for LexicographicallyOrdered<i32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u32; 16]>>> for LexicographicallyOrdered<u32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[f32; 16]>>> for LexicographicallyOrdered<f32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m32; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m32; 16]>>> for LexicographicallyOrdered<m32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i64; 8]>>> for LexicographicallyOrdered<i64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u64; 8]>>> for LexicographicallyOrdered<u64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[f64; 8]>>> for LexicographicallyOrdered<f64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m64; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m64; 8]>>> for LexicographicallyOrdered<m64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[i128; 4]>>> for LexicographicallyOrdered<i128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[u128; 4]>>> for LexicographicallyOrdered<u128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[m128; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[m128; 4]>>> for LexicographicallyOrdered<m128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[isize; 2]>>> for LexicographicallyOrdered<isizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[usize; 2]>>> for LexicographicallyOrdered<usizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[msize; 2]>> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[msize; 2]>>> for LexicographicallyOrdered<msizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[isize; 4]>>> for LexicographicallyOrdered<isizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[usize; 4]>>> for LexicographicallyOrdered<usizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[msize; 4]>> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[msize; 4]>>> for LexicographicallyOrdered<msizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[isize; 8]>>> for LexicographicallyOrdered<isizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[usize; 8]>>> for LexicographicallyOrdered<usizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialEq<Simd<[msize; 8]>> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl PartialEq<LexicographicallyOrdered<Simd<[msize; 8]>>> for LexicographicallyOrdered<msizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> PartialEq<Simd<[*const T; 2]>> for cptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx2"]},{"text":"impl<T> PartialEq<LexicographicallyOrdered<Simd<[*const T; 2]>>> for LexicographicallyOrdered<cptrx2<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> PartialEq<Simd<[*mut T; 2]>> for mptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx2"]},{"text":"impl<T> PartialEq<LexicographicallyOrdered<Simd<[*mut T; 2]>>> for LexicographicallyOrdered<mptrx2<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> PartialEq<Simd<[*const T; 4]>> for cptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx4"]},{"text":"impl<T> PartialEq<LexicographicallyOrdered<Simd<[*const T; 4]>>> for LexicographicallyOrdered<cptrx4<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> PartialEq<Simd<[*mut T; 4]>> for mptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx4"]},{"text":"impl<T> PartialEq<LexicographicallyOrdered<Simd<[*mut T; 4]>>> for LexicographicallyOrdered<mptrx4<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> PartialEq<Simd<[*const T; 8]>> for cptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx8"]},{"text":"impl<T> PartialEq<LexicographicallyOrdered<Simd<[*const T; 8]>>> for LexicographicallyOrdered<cptrx8<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl<T> PartialEq<Simd<[*mut T; 8]>> for mptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx8"]},{"text":"impl<T> PartialEq<LexicographicallyOrdered<Simd<[*mut T; 8]>>> for LexicographicallyOrdered<mptrx8<T>>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/cmp/trait.PartialOrd.js b/implementors/core/cmp/trait.PartialOrd.js new file mode 100644 index 000000000..9ca955116 --- /dev/null +++ b/implementors/core/cmp/trait.PartialOrd.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl PartialOrd<m8> for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl PartialOrd<m16> for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl PartialOrd<m32> for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl PartialOrd<m64> for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl PartialOrd<m128> for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl PartialOrd<msize> for msize","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 2]>>> for LexicographicallyOrdered<i8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 2]>>> for LexicographicallyOrdered<u8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 2]>>> for LexicographicallyOrdered<m8x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 4]>>> for LexicographicallyOrdered<i8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 4]>>> for LexicographicallyOrdered<u8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 4]>>> for LexicographicallyOrdered<m8x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 2]>>> for LexicographicallyOrdered<i16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 2]>>> for LexicographicallyOrdered<u16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 2]>>> for LexicographicallyOrdered<m16x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 8]>>> for LexicographicallyOrdered<i8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 8]>>> for LexicographicallyOrdered<u8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 8]>>> for LexicographicallyOrdered<m8x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 4]>>> for LexicographicallyOrdered<i16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 4]>>> for LexicographicallyOrdered<u16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 4]>>> for LexicographicallyOrdered<m16x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 2]>>> for LexicographicallyOrdered<i32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 2]>>> for LexicographicallyOrdered<u32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 2]>>> for LexicographicallyOrdered<m32x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 16]>>> for LexicographicallyOrdered<i8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 16]>>> for LexicographicallyOrdered<u8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 16]>>> for LexicographicallyOrdered<m8x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 8]>>> for LexicographicallyOrdered<i16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 8]>>> for LexicographicallyOrdered<u16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 8]>>> for LexicographicallyOrdered<m16x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 4]>>> for LexicographicallyOrdered<i32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 4]>>> for LexicographicallyOrdered<u32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 4]>>> for LexicographicallyOrdered<m32x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i64; 2]>>> for LexicographicallyOrdered<i64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u64; 2]>>> for LexicographicallyOrdered<u64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m64; 2]>>> for LexicographicallyOrdered<m64x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i128; 1]>>> for LexicographicallyOrdered<i128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u128; 1]>>> for LexicographicallyOrdered<u128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m128; 1]>>> for LexicographicallyOrdered<m128x1>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 32]>>> for LexicographicallyOrdered<i8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 32]>>> for LexicographicallyOrdered<u8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 32]>>> for LexicographicallyOrdered<m8x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 16]>>> for LexicographicallyOrdered<i16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 16]>>> for LexicographicallyOrdered<u16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 16]>>> for LexicographicallyOrdered<m16x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 8]>>> for LexicographicallyOrdered<i32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 8]>>> for LexicographicallyOrdered<u32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 8]>>> for LexicographicallyOrdered<m32x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i64; 4]>>> for LexicographicallyOrdered<i64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u64; 4]>>> for LexicographicallyOrdered<u64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m64; 4]>>> for LexicographicallyOrdered<m64x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i128; 2]>>> for LexicographicallyOrdered<i128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u128; 2]>>> for LexicographicallyOrdered<u128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m128; 2]>>> for LexicographicallyOrdered<m128x2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 64]>>> for LexicographicallyOrdered<i8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 64]>>> for LexicographicallyOrdered<u8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 64]>>> for LexicographicallyOrdered<m8x64>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 32]>>> for LexicographicallyOrdered<i16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 32]>>> for LexicographicallyOrdered<u16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 32]>>> for LexicographicallyOrdered<m16x32>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 16]>>> for LexicographicallyOrdered<i32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 16]>>> for LexicographicallyOrdered<u32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 16]>>> for LexicographicallyOrdered<m32x16>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i64; 8]>>> for LexicographicallyOrdered<i64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u64; 8]>>> for LexicographicallyOrdered<u64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m64; 8]>>> for LexicographicallyOrdered<m64x8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[i128; 4]>>> for LexicographicallyOrdered<i128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[u128; 4]>>> for LexicographicallyOrdered<u128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[m128; 4]>>> for LexicographicallyOrdered<m128x4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[isize; 2]>>> for LexicographicallyOrdered<isizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[usize; 2]>>> for LexicographicallyOrdered<usizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[msize; 2]>>> for LexicographicallyOrdered<msizex2>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[isize; 4]>>> for LexicographicallyOrdered<isizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[usize; 4]>>> for LexicographicallyOrdered<usizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[msize; 4]>>> for LexicographicallyOrdered<msizex4>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[isize; 8]>>> for LexicographicallyOrdered<isizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[usize; 8]>>> for LexicographicallyOrdered<usizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl PartialOrd<LexicographicallyOrdered<Simd<[msize; 8]>>> for LexicographicallyOrdered<msizex8>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/convert/trait.From.js b/implementors/core/convert/trait.From.js new file mode 100644 index 000000000..b6448f4a1 --- /dev/null +++ b/implementors/core/convert/trait.From.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl From<[i8; 2]> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl From<Simd<[i8; 2]>> for [i8; 2]","synthetic":false,"types":[]},{"text":"impl From<[u8; 2]> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl From<Simd<[u8; 2]>> for [u8; 2]","synthetic":false,"types":[]},{"text":"impl From<[m8; 2]> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl From<Simd<[m8; 2]>> for [m8; 2]","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl From<Simd<[m16; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl From<Simd<[m32; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl From<Simd<[m64; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl From<Simd<[m128; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl From<[i8; 4]> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl From<Simd<[i8; 4]>> for [i8; 4]","synthetic":false,"types":[]},{"text":"impl From<[u8; 4]> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl From<Simd<[u8; 4]>> for [u8; 4]","synthetic":false,"types":[]},{"text":"impl From<[m8; 4]> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl From<Simd<[m8; 4]>> for [m8; 4]","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl From<Simd<[m16; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl From<Simd<[m32; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl From<Simd<[m64; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl From<[i16; 2]> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl From<Simd<[i16; 2]>> for [i16; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl From<Simd<[u8; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl From<[u16; 2]> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl From<Simd<[u16; 2]>> for [u16; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl From<[m16; 2]> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl From<Simd<[m16; 2]>> for [m16; 2]","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl From<Simd<[m8; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl From<Simd<[m32; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl From<Simd<[m64; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl From<Simd<[m128; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl From<[i8; 8]> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl From<Simd<[i8; 8]>> for [i8; 8]","synthetic":false,"types":[]},{"text":"impl From<[u8; 8]> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl From<Simd<[u8; 8]>> for [u8; 8]","synthetic":false,"types":[]},{"text":"impl From<[m8; 8]> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl From<Simd<[m8; 8]>> for [m8; 8]","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl From<Simd<[m16; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl From<Simd<[m32; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl From<[i16; 4]> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl From<Simd<[i16; 4]>> for [i16; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl From<Simd<[u8; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl From<[u16; 4]> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl From<Simd<[u16; 4]>> for [u16; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl From<[m16; 4]> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl From<Simd<[m16; 4]>> for [m16; 4]","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl From<Simd<[m8; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl From<Simd<[m32; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl From<Simd<[m64; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl From<[i32; 2]> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl From<Simd<[i32; 2]>> for [i32; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl From<Simd<[u8; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl From<Simd<[i16; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl From<Simd<[u16; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl From<[u32; 2]> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl From<Simd<[u32; 2]>> for [u32; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl From<Simd<[u16; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl From<[m32; 2]> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl From<Simd<[m32; 2]>> for [m32; 2]","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl From<Simd<[m8; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl From<Simd<[m16; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl From<Simd<[m64; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl From<Simd<[m128; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl From<[f32; 2]> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl From<Simd<[f32; 2]>> for [f32; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl From<Simd<[u8; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl From<Simd<[i16; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl From<Simd<[u16; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl From<[i8; 16]> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl From<Simd<[i8; 16]>> for [i8; 16]","synthetic":false,"types":[]},{"text":"impl From<[u8; 16]> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl From<Simd<[u8; 16]>> for [u8; 16]","synthetic":false,"types":[]},{"text":"impl From<[m8; 16]> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl From<Simd<[m8; 16]>> for [m8; 16]","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl From<Simd<[m16; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl From<[i16; 8]> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl From<Simd<[i16; 8]>> for [i16; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl From<Simd<[u8; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl From<[u16; 8]> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl From<Simd<[u16; 8]>> for [u16; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl From<[m16; 8]> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl From<Simd<[m16; 8]>> for [m16; 8]","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl From<Simd<[m8; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl From<Simd<[m32; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl From<[i32; 4]> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl From<Simd<[i32; 4]>> for [i32; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl From<Simd<[u8; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl From<Simd<[i16; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl From<Simd<[u16; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl From<[u32; 4]> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl From<Simd<[u32; 4]>> for [u32; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl From<Simd<[u16; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl From<[f32; 4]> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl From<Simd<[f32; 4]>> for [f32; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl From<Simd<[u8; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl From<Simd<[i16; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl From<Simd<[u16; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl From<[m32; 4]> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl From<Simd<[m32; 4]>> for [m32; 4]","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl From<Simd<[m8; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl From<Simd<[m16; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl From<Simd<[m64; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl From<[i64; 2]> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl From<Simd<[i64; 2]>> for [i64; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl From<Simd<[u8; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl From<Simd<[i16; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl From<Simd<[u16; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl From<Simd<[i32; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl From<Simd<[u32; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl From<[u64; 2]> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl From<Simd<[u64; 2]>> for [u64; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl From<Simd<[u16; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl From<Simd<[u32; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl From<[f64; 2]> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<Simd<[f64; 2]>> for [f64; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<Simd<[u8; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<Simd<[i16; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<Simd<[u16; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<Simd<[i32; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<Simd<[u32; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<Simd<[f32; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl From<[m64; 2]> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl From<Simd<[m64; 2]>> for [m64; 2]","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl From<Simd<[m8; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl From<Simd<[m16; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl From<Simd<[m32; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl From<Simd<[m128; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl From<[i128; 1]> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl From<Simd<[i128; 1]>> for [i128; 1]","synthetic":false,"types":[]},{"text":"impl From<[u128; 1]> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl From<Simd<[u128; 1]>> for [u128; 1]","synthetic":false,"types":[]},{"text":"impl From<[m128; 1]> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl From<Simd<[m128; 1]>> for [m128; 1]","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl From<[i8; 32]> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl From<Simd<[i8; 32]>> for [i8; 32]","synthetic":false,"types":[]},{"text":"impl From<[u8; 32]> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl From<Simd<[u8; 32]>> for [u8; 32]","synthetic":false,"types":[]},{"text":"impl From<[m8; 32]> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl From<Simd<[m8; 32]>> for [m8; 32]","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl From<[i16; 16]> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl From<Simd<[i16; 16]>> for [i16; 16]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl From<Simd<[u8; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl From<[u16; 16]> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl From<Simd<[u16; 16]>> for [u16; 16]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl From<[m16; 16]> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl From<Simd<[m16; 16]>> for [m16; 16]","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl From<Simd<[m8; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl From<[i32; 8]> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl From<Simd<[i32; 8]>> for [i32; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl From<Simd<[u8; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl From<Simd<[i16; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl From<Simd<[u16; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl From<[u32; 8]> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl From<Simd<[u32; 8]>> for [u32; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl From<Simd<[u16; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl From<[f32; 8]> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl From<Simd<[f32; 8]>> for [f32; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl From<Simd<[u8; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl From<Simd<[i16; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl From<Simd<[u16; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl From<[m32; 8]> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl From<Simd<[m32; 8]>> for [m32; 8]","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl From<Simd<[m8; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl From<Simd<[m16; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl From<[i64; 4]> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl From<Simd<[i64; 4]>> for [i64; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl From<Simd<[u8; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl From<Simd<[i16; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl From<Simd<[u16; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl From<Simd<[i32; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl From<Simd<[u32; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl From<[u64; 4]> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl From<Simd<[u64; 4]>> for [u64; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl From<Simd<[u16; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl From<Simd<[u32; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl From<[f64; 4]> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<Simd<[f64; 4]>> for [f64; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<Simd<[u8; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<Simd<[i16; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<Simd<[u16; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<Simd<[i32; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<Simd<[u32; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<Simd<[f32; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl From<[m64; 4]> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl From<Simd<[m64; 4]>> for [m64; 4]","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl From<Simd<[m8; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl From<Simd<[m16; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl From<Simd<[m32; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl From<[i128; 2]> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[i128; 2]>> for [i128; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[u8; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[i16; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[u16; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[i32; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[u32; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[i64; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<Simd<[u64; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl From<[u128; 2]> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl From<Simd<[u128; 2]>> for [u128; 2]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl From<Simd<[u16; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl From<Simd<[u32; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl From<Simd<[u64; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl From<[m128; 2]> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl From<Simd<[m128; 2]>> for [m128; 2]","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl From<Simd<[m8; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl From<Simd<[m16; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl From<Simd<[m32; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl From<Simd<[m64; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl From<[i8; 64]> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl From<Simd<[i8; 64]>> for [i8; 64]","synthetic":false,"types":[]},{"text":"impl From<[u8; 64]> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl From<Simd<[u8; 64]>> for [u8; 64]","synthetic":false,"types":[]},{"text":"impl From<[m8; 64]> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl From<Simd<[m8; 64]>> for [m8; 64]","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl From<[i16; 32]> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl From<Simd<[i16; 32]>> for [i16; 32]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl From<Simd<[u8; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl From<[u16; 32]> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl From<Simd<[u16; 32]>> for [u16; 32]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl From<[m16; 32]> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl From<Simd<[m16; 32]>> for [m16; 32]","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl From<Simd<[m8; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl From<[i32; 16]> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl From<Simd<[i32; 16]>> for [i32; 16]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl From<Simd<[u8; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl From<Simd<[i16; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl From<Simd<[u16; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl From<[u32; 16]> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl From<Simd<[u32; 16]>> for [u32; 16]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl From<Simd<[u16; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl From<[f32; 16]> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl From<Simd<[f32; 16]>> for [f32; 16]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl From<Simd<[u8; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl From<Simd<[i16; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl From<Simd<[u16; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl From<[m32; 16]> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl From<Simd<[m32; 16]>> for [m32; 16]","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl From<Simd<[m8; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl From<Simd<[m16; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl From<[i64; 8]> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl From<Simd<[i64; 8]>> for [i64; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl From<Simd<[u8; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl From<Simd<[i16; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl From<Simd<[u16; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl From<Simd<[i32; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl From<Simd<[u32; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl From<[u64; 8]> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl From<Simd<[u64; 8]>> for [u64; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl From<Simd<[u16; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl From<Simd<[u32; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl From<[f64; 8]> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<Simd<[f64; 8]>> for [f64; 8]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<Simd<[u8; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<Simd<[i16; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<Simd<[u16; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<Simd<[i32; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<Simd<[u32; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<Simd<[f32; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl From<[m64; 8]> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl From<Simd<[m64; 8]>> for [m64; 8]","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl From<Simd<[m8; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl From<Simd<[m16; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl From<Simd<[m32; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl From<[i128; 4]> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[i128; 4]>> for [i128; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[i8; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[u8; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[i16; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[u16; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[i32; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[u32; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[i64; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<Simd<[u64; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl From<[u128; 4]> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl From<Simd<[u128; 4]>> for [u128; 4]","synthetic":false,"types":[]},{"text":"impl From<Simd<[u8; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl From<Simd<[u16; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl From<Simd<[u32; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl From<Simd<[u64; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl From<[m128; 4]> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl From<Simd<[m128; 4]>> for [m128; 4]","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl From<Simd<[m8; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl From<Simd<[m16; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl From<Simd<[m32; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl From<Simd<[m64; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl From<[isize; 2]> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl From<Simd<[isize; 2]>> for [isize; 2]","synthetic":false,"types":[]},{"text":"impl From<[usize; 2]> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl From<Simd<[usize; 2]>> for [usize; 2]","synthetic":false,"types":[]},{"text":"impl From<[msize; 2]> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl From<Simd<[msize; 2]>> for [msize; 2]","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl From<[isize; 4]> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl From<Simd<[isize; 4]>> for [isize; 4]","synthetic":false,"types":[]},{"text":"impl From<[usize; 4]> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl From<Simd<[usize; 4]>> for [usize; 4]","synthetic":false,"types":[]},{"text":"impl From<[msize; 4]> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl From<Simd<[msize; 4]>> for [msize; 4]","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl From<[isize; 8]> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl From<Simd<[isize; 8]>> for [isize; 8]","synthetic":false,"types":[]},{"text":"impl From<[usize; 8]> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl From<Simd<[usize; 8]>> for [usize; 8]","synthetic":false,"types":[]},{"text":"impl From<[msize; 8]> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl From<Simd<[msize; 8]>> for [msize; 8]","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl<T> From<[*const T; 2]> for cptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx2"]},{"text":"impl<T> From<[*mut T; 2]> for mptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx2"]},{"text":"impl<T> From<[*const T; 4]> for cptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx4"]},{"text":"impl<T> From<[*mut T; 4]> for mptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx4"]},{"text":"impl<T> From<[*const T; 8]> for cptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx8"]},{"text":"impl<T> From<[*mut T; 8]> for mptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/convert/trait.Into.js b/implementors/core/convert/trait.Into.js new file mode 100644 index 000000000..542ac8344 --- /dev/null +++ b/implementors/core/convert/trait.Into.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<T> Into<[*const T; 2]> for cptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx2"]},{"text":"impl<T> Into<[*mut T; 2]> for mptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx2"]},{"text":"impl<T> Into<[*const T; 4]> for cptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx4"]},{"text":"impl<T> Into<[*mut T; 4]> for mptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx4"]},{"text":"impl<T> Into<[*const T; 8]> for cptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx8"]},{"text":"impl<T> Into<[*mut T; 8]> for mptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/default/trait.Default.js b/implementors/core/default/trait.Default.js new file mode 100644 index 000000000..79b27d133 --- /dev/null +++ b/implementors/core/default/trait.Default.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Default for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl Default for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl Default for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl Default for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl Default for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl Default for msize","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl Default for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Default for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Default for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl Default for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Default for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Default for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl Default for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Default for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Default for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl Default for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Default for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Default for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl Default for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Default for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Default for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl Default for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Default for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Default for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl Default for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Default for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Default for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Default for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl Default for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Default for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Default for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl Default for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Default for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Default for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Default for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl Default for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Default for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Default for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Default for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl Default for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Default for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Default for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl Default for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Default for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Default for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl Default for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Default for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Default for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl Default for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Default for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Default for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Default for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl Default for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Default for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Default for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Default for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl Default for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Default for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Default for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl Default for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Default for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Default for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl Default for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Default for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Default for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl Default for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Default for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Default for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Default for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl Default for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Default for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Default for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Default for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl Default for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Default for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Default for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl Default for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Default for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Default for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl Default for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Default for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Default for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl Default for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Default for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Default for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl<T> Default for cptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx2"]},{"text":"impl<T> Default for mptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx2"]},{"text":"impl<T> Default for cptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx4"]},{"text":"impl<T> Default for mptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx4"]},{"text":"impl<T> Default for cptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx8"]},{"text":"impl<T> Default for mptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/fmt/trait.Binary.js b/implementors/core/fmt/trait.Binary.js new file mode 100644 index 000000000..b10bc5f24 --- /dev/null +++ b/implementors/core/fmt/trait.Binary.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Binary for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Binary for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Binary for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Binary for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Binary for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Binary for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Binary for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Binary for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Binary for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Binary for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Binary for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Binary for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Binary for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Binary for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Binary for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Binary for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Binary for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Binary for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Binary for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Binary for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Binary for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Binary for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Binary for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Binary for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Binary for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Binary for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Binary for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Binary for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Binary for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Binary for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Binary for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Binary for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Binary for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Binary for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Binary for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Binary for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Binary for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Binary for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Binary for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Binary for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Binary for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Binary for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Binary for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Binary for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Binary for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Binary for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Binary for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Binary for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/fmt/trait.Debug.js b/implementors/core/fmt/trait.Debug.js new file mode 100644 index 000000000..c821a1415 --- /dev/null +++ b/implementors/core/fmt/trait.Debug.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<T: Debug> Debug for LexicographicallyOrdered<T>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Debug for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl Debug for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl Debug for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl Debug for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl Debug for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl Debug for msize","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl Debug for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Debug for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Debug for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl Debug for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Debug for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Debug for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl Debug for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Debug for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Debug for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl Debug for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Debug for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Debug for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl Debug for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Debug for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Debug for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl Debug for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Debug for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Debug for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl Debug for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Debug for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Debug for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Debug for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl Debug for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Debug for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Debug for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl Debug for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Debug for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Debug for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Debug for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl Debug for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Debug for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Debug for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Debug for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl Debug for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Debug for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Debug for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl Debug for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Debug for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Debug for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl Debug for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Debug for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Debug for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl Debug for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Debug for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Debug for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Debug for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl Debug for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Debug for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Debug for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Debug for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl Debug for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Debug for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Debug for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl Debug for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Debug for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Debug for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl Debug for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Debug for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Debug for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl Debug for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Debug for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Debug for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Debug for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl Debug for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Debug for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Debug for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Debug for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl Debug for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Debug for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Debug for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl Debug for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Debug for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Debug for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl Debug for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Debug for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Debug for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl Debug for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Debug for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Debug for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl<T> Debug for cptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx2"]},{"text":"impl<T> Debug for mptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx2"]},{"text":"impl<T> Debug for cptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx4"]},{"text":"impl<T> Debug for mptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx4"]},{"text":"impl<T> Debug for cptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx8"]},{"text":"impl<T> Debug for mptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/fmt/trait.LowerHex.js b/implementors/core/fmt/trait.LowerHex.js new file mode 100644 index 000000000..08192de1b --- /dev/null +++ b/implementors/core/fmt/trait.LowerHex.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl LowerHex for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl LowerHex for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl LowerHex for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl LowerHex for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl LowerHex for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl LowerHex for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl LowerHex for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl LowerHex for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl LowerHex for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl LowerHex for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl LowerHex for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl LowerHex for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl LowerHex for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl LowerHex for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl LowerHex for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl LowerHex for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl LowerHex for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl LowerHex for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl LowerHex for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl LowerHex for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl LowerHex for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl LowerHex for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl LowerHex for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl LowerHex for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl LowerHex for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl LowerHex for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl LowerHex for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl LowerHex for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl LowerHex for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl LowerHex for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl LowerHex for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl LowerHex for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl LowerHex for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl LowerHex for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl LowerHex for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl LowerHex for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl LowerHex for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl LowerHex for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl LowerHex for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl LowerHex for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl LowerHex for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl LowerHex for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl LowerHex for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl LowerHex for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl LowerHex for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl LowerHex for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl LowerHex for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl LowerHex for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/fmt/trait.Octal.js b/implementors/core/fmt/trait.Octal.js new file mode 100644 index 000000000..8a5a49b68 --- /dev/null +++ b/implementors/core/fmt/trait.Octal.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Octal for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Octal for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Octal for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Octal for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Octal for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Octal for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Octal for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Octal for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Octal for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Octal for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Octal for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Octal for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Octal for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Octal for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Octal for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Octal for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Octal for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Octal for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Octal for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Octal for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Octal for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Octal for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Octal for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Octal for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Octal for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Octal for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Octal for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Octal for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Octal for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Octal for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Octal for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Octal for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Octal for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Octal for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Octal for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Octal for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Octal for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Octal for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Octal for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Octal for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Octal for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Octal for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Octal for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Octal for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Octal for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Octal for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Octal for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Octal for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/fmt/trait.UpperHex.js b/implementors/core/fmt/trait.UpperHex.js new file mode 100644 index 000000000..c45e796c8 --- /dev/null +++ b/implementors/core/fmt/trait.UpperHex.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl UpperHex for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl UpperHex for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl UpperHex for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl UpperHex for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl UpperHex for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl UpperHex for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl UpperHex for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl UpperHex for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl UpperHex for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl UpperHex for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl UpperHex for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl UpperHex for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl UpperHex for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl UpperHex for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl UpperHex for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl UpperHex for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl UpperHex for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl UpperHex for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl UpperHex for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl UpperHex for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl UpperHex for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl UpperHex for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl UpperHex for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl UpperHex for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl UpperHex for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl UpperHex for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl UpperHex for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl UpperHex for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl UpperHex for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl UpperHex for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl UpperHex for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl UpperHex for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl UpperHex for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl UpperHex for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl UpperHex for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl UpperHex for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl UpperHex for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl UpperHex for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl UpperHex for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl UpperHex for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl UpperHex for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl UpperHex for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl UpperHex for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl UpperHex for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl UpperHex for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl UpperHex for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl UpperHex for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl UpperHex for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/hash/trait.Hash.js b/implementors/core/hash/trait.Hash.js new file mode 100644 index 000000000..193c9b790 --- /dev/null +++ b/implementors/core/hash/trait.Hash.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Hash for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl Hash for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl Hash for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl Hash for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl Hash for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl Hash for msize","synthetic":false,"types":["packed_simd::masks::msize"]},{"text":"impl Hash for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Hash for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Hash for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Hash for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Hash for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Hash for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Hash for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Hash for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Hash for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Hash for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Hash for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Hash for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Hash for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Hash for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Hash for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Hash for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Hash for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Hash for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Hash for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Hash for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Hash for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Hash for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Hash for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Hash for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Hash for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Hash for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Hash for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Hash for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Hash for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Hash for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Hash for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Hash for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Hash for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Hash for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Hash for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Hash for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Hash for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Hash for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Hash for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Hash for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Hash for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Hash for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Hash for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Hash for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Hash for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Hash for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Hash for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Hash for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl<T> Hash for cptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx2"]},{"text":"impl<T> Hash for mptrx2<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx2"]},{"text":"impl<T> Hash for cptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx4"]},{"text":"impl<T> Hash for mptrx4<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx4"]},{"text":"impl<T> Hash for cptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::cptrx8"]},{"text":"impl<T> Hash for mptrx8<T>","synthetic":false,"types":["packed_simd::vPtr::mptrx8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/iter/traits/accum/trait.Product.js b/implementors/core/iter/traits/accum/trait.Product.js new file mode 100644 index 000000000..f078586a4 --- /dev/null +++ b/implementors/core/iter/traits/accum/trait.Product.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Product<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl<'a> Product<&'a Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Product<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl<'a> Product<&'a Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Product<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl<'a> Product<&'a Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Product<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl<'a> Product<&'a Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Product<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl<'a> Product<&'a Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Product<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl<'a> Product<&'a Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Product<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl<'a> Product<&'a Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Product<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl<'a> Product<&'a Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Product<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl<'a> Product<&'a Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Product<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl<'a> Product<&'a Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Product<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl<'a> Product<&'a Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Product<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl<'a> Product<&'a Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Product<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl<'a> Product<&'a Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Product<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl<'a> Product<&'a Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Product<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl<'a> Product<&'a Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Product<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl<'a> Product<&'a Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Product<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl<'a> Product<&'a Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Product<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl<'a> Product<&'a Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Product<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl<'a> Product<&'a Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Product<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl<'a> Product<&'a Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Product<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl<'a> Product<&'a Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Product<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl<'a> Product<&'a Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Product<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl<'a> Product<&'a Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Product<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl<'a> Product<&'a Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Product<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl<'a> Product<&'a Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Product<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl<'a> Product<&'a Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Product<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl<'a> Product<&'a Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Product<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl<'a> Product<&'a Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Product<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl<'a> Product<&'a Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Product<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl<'a> Product<&'a Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Product<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl<'a> Product<&'a Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Product<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl<'a> Product<&'a Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Product<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl<'a> Product<&'a Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Product<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl<'a> Product<&'a Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Product<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl<'a> Product<&'a Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Product<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl<'a> Product<&'a Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Product<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl<'a> Product<&'a Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Product<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl<'a> Product<&'a Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Product<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl<'a> Product<&'a Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Product<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl<'a> Product<&'a Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Product<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl<'a> Product<&'a Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Product<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl<'a> Product<&'a Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Product<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl<'a> Product<&'a Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Product<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl<'a> Product<&'a Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Product<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl<'a> Product<&'a Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Product<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl<'a> Product<&'a Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Product<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl<'a> Product<&'a Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Product<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl<'a> Product<&'a Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Product<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl<'a> Product<&'a Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Product<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl<'a> Product<&'a Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Product<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl<'a> Product<&'a Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Product<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl<'a> Product<&'a Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Product<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl<'a> Product<&'a Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Product<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl<'a> Product<&'a Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Product<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl<'a> Product<&'a Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/iter/traits/accum/trait.Sum.js b/implementors/core/iter/traits/accum/trait.Sum.js new file mode 100644 index 000000000..1c55637e4 --- /dev/null +++ b/implementors/core/iter/traits/accum/trait.Sum.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Sum<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl<'a> Sum<&'a Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Sum<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl<'a> Sum<&'a Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Sum<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl<'a> Sum<&'a Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Sum<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl<'a> Sum<&'a Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Sum<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl<'a> Sum<&'a Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Sum<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl<'a> Sum<&'a Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Sum<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl<'a> Sum<&'a Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Sum<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl<'a> Sum<&'a Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Sum<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl<'a> Sum<&'a Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Sum<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl<'a> Sum<&'a Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Sum<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl<'a> Sum<&'a Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Sum<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl<'a> Sum<&'a Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Sum<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl<'a> Sum<&'a Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Sum<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl<'a> Sum<&'a Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Sum<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl<'a> Sum<&'a Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Sum<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl<'a> Sum<&'a Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Sum<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl<'a> Sum<&'a Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Sum<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl<'a> Sum<&'a Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Sum<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl<'a> Sum<&'a Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Sum<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl<'a> Sum<&'a Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Sum<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl<'a> Sum<&'a Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Sum<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl<'a> Sum<&'a Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Sum<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl<'a> Sum<&'a Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Sum<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl<'a> Sum<&'a Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Sum<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl<'a> Sum<&'a Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Sum<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl<'a> Sum<&'a Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Sum<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl<'a> Sum<&'a Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Sum<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl<'a> Sum<&'a Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Sum<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl<'a> Sum<&'a Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Sum<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl<'a> Sum<&'a Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Sum<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl<'a> Sum<&'a Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Sum<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl<'a> Sum<&'a Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Sum<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl<'a> Sum<&'a Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Sum<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl<'a> Sum<&'a Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Sum<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl<'a> Sum<&'a Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Sum<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl<'a> Sum<&'a Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Sum<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl<'a> Sum<&'a Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Sum<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl<'a> Sum<&'a Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Sum<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl<'a> Sum<&'a Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Sum<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl<'a> Sum<&'a Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Sum<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl<'a> Sum<&'a Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Sum<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl<'a> Sum<&'a Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Sum<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl<'a> Sum<&'a Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Sum<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl<'a> Sum<&'a Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Sum<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl<'a> Sum<&'a Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Sum<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl<'a> Sum<&'a Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Sum<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl<'a> Sum<&'a Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Sum<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl<'a> Sum<&'a Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Sum<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl<'a> Sum<&'a Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Sum<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl<'a> Sum<&'a Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Sum<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl<'a> Sum<&'a Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Sum<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl<'a> Sum<&'a Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Sum<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl<'a> Sum<&'a Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Sum<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl<'a> Sum<&'a Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Sum<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl<'a> Sum<&'a Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Copy.js b/implementors/core/marker/trait.Copy.js new file mode 100644 index 000000000..690cf875e --- /dev/null +++ b/implementors/core/marker/trait.Copy.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<A: Copy + SimdArray> Copy for Simd<A>","synthetic":false,"types":["packed_simd::Simd"]},{"text":"impl<T: Copy> Copy for LexicographicallyOrdered<T>","synthetic":false,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Copy for m8","synthetic":false,"types":["packed_simd::masks::m8"]},{"text":"impl Copy for m16","synthetic":false,"types":["packed_simd::masks::m16"]},{"text":"impl Copy for m32","synthetic":false,"types":["packed_simd::masks::m32"]},{"text":"impl Copy for m64","synthetic":false,"types":["packed_simd::masks::m64"]},{"text":"impl Copy for m128","synthetic":false,"types":["packed_simd::masks::m128"]},{"text":"impl Copy for msize","synthetic":false,"types":["packed_simd::masks::msize"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Freeze.js b/implementors/core/marker/trait.Freeze.js new file mode 100644 index 000000000..8962707c0 --- /dev/null +++ b/implementors/core/marker/trait.Freeze.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<A> Freeze for Simd<A> where
    <A as SimdArray>::Tuple: Freeze, 
","synthetic":true,"types":["packed_simd::Simd"]},{"text":"impl<T> Freeze for LexicographicallyOrdered<T> where
    T: Freeze, 
","synthetic":true,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Freeze for m8","synthetic":true,"types":["packed_simd::masks::m8"]},{"text":"impl Freeze for m16","synthetic":true,"types":["packed_simd::masks::m16"]},{"text":"impl Freeze for m32","synthetic":true,"types":["packed_simd::masks::m32"]},{"text":"impl Freeze for m64","synthetic":true,"types":["packed_simd::masks::m64"]},{"text":"impl Freeze for m128","synthetic":true,"types":["packed_simd::masks::m128"]},{"text":"impl Freeze for msize","synthetic":true,"types":["packed_simd::masks::msize"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Send.js b/implementors/core/marker/trait.Send.js new file mode 100644 index 000000000..90fd192c2 --- /dev/null +++ b/implementors/core/marker/trait.Send.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<A> Send for Simd<A> where
    <A as SimdArray>::Tuple: Send
","synthetic":true,"types":["packed_simd::Simd"]},{"text":"impl<T> Send for LexicographicallyOrdered<T> where
    T: Send
","synthetic":true,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Send for m8","synthetic":true,"types":["packed_simd::masks::m8"]},{"text":"impl Send for m16","synthetic":true,"types":["packed_simd::masks::m16"]},{"text":"impl Send for m32","synthetic":true,"types":["packed_simd::masks::m32"]},{"text":"impl Send for m64","synthetic":true,"types":["packed_simd::masks::m64"]},{"text":"impl Send for m128","synthetic":true,"types":["packed_simd::masks::m128"]},{"text":"impl Send for msize","synthetic":true,"types":["packed_simd::masks::msize"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Sync.js b/implementors/core/marker/trait.Sync.js new file mode 100644 index 000000000..fa20ea6cc --- /dev/null +++ b/implementors/core/marker/trait.Sync.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<A> Sync for Simd<A> where
    <A as SimdArray>::Tuple: Sync
","synthetic":true,"types":["packed_simd::Simd"]},{"text":"impl<T> Sync for LexicographicallyOrdered<T> where
    T: Sync
","synthetic":true,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Sync for m8","synthetic":true,"types":["packed_simd::masks::m8"]},{"text":"impl Sync for m16","synthetic":true,"types":["packed_simd::masks::m16"]},{"text":"impl Sync for m32","synthetic":true,"types":["packed_simd::masks::m32"]},{"text":"impl Sync for m64","synthetic":true,"types":["packed_simd::masks::m64"]},{"text":"impl Sync for m128","synthetic":true,"types":["packed_simd::masks::m128"]},{"text":"impl Sync for msize","synthetic":true,"types":["packed_simd::masks::msize"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/marker/trait.Unpin.js b/implementors/core/marker/trait.Unpin.js new file mode 100644 index 000000000..b6a98cbdf --- /dev/null +++ b/implementors/core/marker/trait.Unpin.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl<A> Unpin for Simd<A> where
    <A as SimdArray>::Tuple: Unpin
","synthetic":true,"types":["packed_simd::Simd"]},{"text":"impl<T> Unpin for LexicographicallyOrdered<T> where
    T: Unpin
","synthetic":true,"types":["packed_simd::LexicographicallyOrdered"]},{"text":"impl Unpin for m8","synthetic":true,"types":["packed_simd::masks::m8"]},{"text":"impl Unpin for m16","synthetic":true,"types":["packed_simd::masks::m16"]},{"text":"impl Unpin for m32","synthetic":true,"types":["packed_simd::masks::m32"]},{"text":"impl Unpin for m64","synthetic":true,"types":["packed_simd::masks::m64"]},{"text":"impl Unpin for m128","synthetic":true,"types":["packed_simd::masks::m128"]},{"text":"impl Unpin for msize","synthetic":true,"types":["packed_simd::masks::msize"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.Add.js b/implementors/core/ops/arith/trait.Add.js new file mode 100644 index 000000000..2f44f6f8c --- /dev/null +++ b/implementors/core/ops/arith/trait.Add.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Add<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Add<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Add<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Add<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Add<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Add<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Add<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Add<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Add<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Add<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Add<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Add<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Add<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Add<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Add<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Add<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Add<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Add<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Add<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Add<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Add<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Add<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Add<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Add<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Add<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Add<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Add<Simd<[f32; 2]>> for f32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Add<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Add<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Add<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Add<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Add<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Add<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Add<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Add<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Add<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Add<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Add<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Add<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Add<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Add<Simd<[f32; 4]>> for f32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Add<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Add<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Add<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Add<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Add<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Add<Simd<[f64; 2]>> for f64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Add<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Add<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Add<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Add<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Add<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Add<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Add<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Add<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Add<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Add<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Add<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Add<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Add<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Add<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Add<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Add<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Add<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Add<Simd<[f32; 8]>> for f32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Add<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Add<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Add<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Add<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Add<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Add<Simd<[f64; 4]>> for f64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Add<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Add<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Add<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Add<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Add<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Add<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Add<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Add<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Add<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Add<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Add<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Add<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Add<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Add<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Add<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Add<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Add<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Add<Simd<[f32; 16]>> for f32","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Add<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Add<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Add<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Add<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Add<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Add<Simd<[f64; 8]>> for f64","synthetic":false,"types":[]},{"text":"impl Add<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Add<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Add<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl Add<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Add<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Add<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl Add<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Add<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Add<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl Add<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Add<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Add<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl Add<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Add<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Add<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl Add<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Add<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Add<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl Add<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Add<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Add<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl Add<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Add<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Add<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.AddAssign.js b/implementors/core/ops/arith/trait.AddAssign.js new file mode 100644 index 000000000..dee23654c --- /dev/null +++ b/implementors/core/ops/arith/trait.AddAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl AddAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl AddAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl AddAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl AddAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl AddAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl AddAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl AddAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl AddAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl AddAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl AddAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl AddAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl AddAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl AddAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl AddAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl AddAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl AddAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl AddAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl AddAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl AddAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl AddAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl AddAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl AddAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl AddAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl AddAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl AddAssign<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl AddAssign<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl AddAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl AddAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl AddAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl AddAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl AddAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl AddAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl AddAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl AddAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl AddAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl AddAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl AddAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl AddAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl AddAssign<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl AddAssign<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl AddAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl AddAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl AddAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl AddAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl AddAssign<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl AddAssign<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl AddAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl AddAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl AddAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl AddAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl AddAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl AddAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl AddAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl AddAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl AddAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl AddAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl AddAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl AddAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl AddAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl AddAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl AddAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl AddAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl AddAssign<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl AddAssign<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl AddAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl AddAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl AddAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl AddAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl AddAssign<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl AddAssign<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl AddAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl AddAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl AddAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl AddAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl AddAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl AddAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl AddAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl AddAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl AddAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl AddAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl AddAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl AddAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl AddAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl AddAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl AddAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl AddAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl AddAssign<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl AddAssign<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl AddAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl AddAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl AddAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl AddAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl AddAssign<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl AddAssign<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl AddAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl AddAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl AddAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl AddAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl AddAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl AddAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl AddAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl AddAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl AddAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl AddAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl AddAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl AddAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl AddAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl AddAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl AddAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl AddAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.Div.js b/implementors/core/ops/arith/trait.Div.js new file mode 100644 index 000000000..bc9ce580d --- /dev/null +++ b/implementors/core/ops/arith/trait.Div.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Div<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Div<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Div<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Div<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Div<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Div<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Div<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Div<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Div<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Div<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Div<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Div<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Div<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Div<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Div<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Div<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Div<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Div<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Div<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Div<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Div<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Div<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Div<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Div<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Div<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Div<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Div<Simd<[f32; 2]>> for f32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Div<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Div<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Div<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Div<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Div<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Div<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Div<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Div<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Div<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Div<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Div<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Div<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Div<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Div<Simd<[f32; 4]>> for f32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Div<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Div<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Div<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Div<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Div<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Div<Simd<[f64; 2]>> for f64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Div<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Div<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Div<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Div<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Div<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Div<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Div<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Div<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Div<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Div<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Div<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Div<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Div<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Div<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Div<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Div<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Div<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Div<Simd<[f32; 8]>> for f32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Div<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Div<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Div<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Div<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Div<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Div<Simd<[f64; 4]>> for f64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Div<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Div<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Div<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Div<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Div<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Div<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Div<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Div<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Div<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Div<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Div<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Div<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Div<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Div<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Div<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Div<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Div<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Div<Simd<[f32; 16]>> for f32","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Div<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Div<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Div<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Div<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Div<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Div<Simd<[f64; 8]>> for f64","synthetic":false,"types":[]},{"text":"impl Div<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Div<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Div<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl Div<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Div<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Div<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl Div<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Div<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Div<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl Div<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Div<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Div<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl Div<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Div<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Div<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl Div<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Div<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Div<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl Div<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Div<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Div<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl Div<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Div<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Div<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.DivAssign.js b/implementors/core/ops/arith/trait.DivAssign.js new file mode 100644 index 000000000..de835a8e4 --- /dev/null +++ b/implementors/core/ops/arith/trait.DivAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl DivAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl DivAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl DivAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl DivAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl DivAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl DivAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl DivAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl DivAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl DivAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl DivAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl DivAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl DivAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl DivAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl DivAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl DivAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl DivAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl DivAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl DivAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl DivAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl DivAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl DivAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl DivAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl DivAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl DivAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl DivAssign<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl DivAssign<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl DivAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl DivAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl DivAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl DivAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl DivAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl DivAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl DivAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl DivAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl DivAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl DivAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl DivAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl DivAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl DivAssign<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl DivAssign<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl DivAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl DivAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl DivAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl DivAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl DivAssign<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl DivAssign<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl DivAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl DivAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl DivAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl DivAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl DivAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl DivAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl DivAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl DivAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl DivAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl DivAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl DivAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl DivAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl DivAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl DivAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl DivAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl DivAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl DivAssign<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl DivAssign<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl DivAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl DivAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl DivAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl DivAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl DivAssign<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl DivAssign<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl DivAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl DivAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl DivAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl DivAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl DivAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl DivAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl DivAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl DivAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl DivAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl DivAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl DivAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl DivAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl DivAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl DivAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl DivAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl DivAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl DivAssign<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl DivAssign<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl DivAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl DivAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl DivAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl DivAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl DivAssign<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl DivAssign<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl DivAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl DivAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl DivAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl DivAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl DivAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl DivAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl DivAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl DivAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl DivAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl DivAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl DivAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl DivAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl DivAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl DivAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl DivAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl DivAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.Mul.js b/implementors/core/ops/arith/trait.Mul.js new file mode 100644 index 000000000..b7285e2c8 --- /dev/null +++ b/implementors/core/ops/arith/trait.Mul.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Mul<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Mul<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Mul<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Mul<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Mul<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Mul<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Mul<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Mul<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Mul<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Mul<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Mul<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Mul<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Mul<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Mul<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Mul<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Mul<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Mul<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Mul<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Mul<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Mul<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Mul<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Mul<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Mul<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Mul<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Mul<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Mul<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Mul<Simd<[f32; 2]>> for f32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Mul<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Mul<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Mul<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Mul<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Mul<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Mul<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Mul<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Mul<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Mul<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Mul<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Mul<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Mul<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Mul<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Mul<Simd<[f32; 4]>> for f32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Mul<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Mul<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Mul<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Mul<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Mul<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Mul<Simd<[f64; 2]>> for f64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Mul<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Mul<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Mul<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Mul<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Mul<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Mul<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Mul<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Mul<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Mul<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Mul<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Mul<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Mul<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Mul<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Mul<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Mul<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Mul<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Mul<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Mul<Simd<[f32; 8]>> for f32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Mul<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Mul<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Mul<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Mul<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Mul<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Mul<Simd<[f64; 4]>> for f64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Mul<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Mul<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Mul<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Mul<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Mul<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Mul<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Mul<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Mul<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Mul<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Mul<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Mul<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Mul<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Mul<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Mul<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Mul<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Mul<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Mul<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Mul<Simd<[f32; 16]>> for f32","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Mul<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Mul<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Mul<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Mul<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Mul<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Mul<Simd<[f64; 8]>> for f64","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Mul<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Mul<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Mul<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Mul<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Mul<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Mul<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Mul<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Mul<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Mul<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Mul<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Mul<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Mul<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Mul<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Mul<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl Mul<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Mul<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Mul<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.MulAssign.js b/implementors/core/ops/arith/trait.MulAssign.js new file mode 100644 index 000000000..ba15b02cc --- /dev/null +++ b/implementors/core/ops/arith/trait.MulAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl MulAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl MulAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl MulAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl MulAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl MulAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl MulAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl MulAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl MulAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl MulAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl MulAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl MulAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl MulAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl MulAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl MulAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl MulAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl MulAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl MulAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl MulAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl MulAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl MulAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl MulAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl MulAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl MulAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl MulAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl MulAssign<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl MulAssign<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl MulAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl MulAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl MulAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl MulAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl MulAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl MulAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl MulAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl MulAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl MulAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl MulAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl MulAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl MulAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl MulAssign<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl MulAssign<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl MulAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl MulAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl MulAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl MulAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl MulAssign<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl MulAssign<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl MulAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl MulAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl MulAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl MulAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl MulAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl MulAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl MulAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl MulAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl MulAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl MulAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl MulAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl MulAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl MulAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl MulAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl MulAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl MulAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl MulAssign<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl MulAssign<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl MulAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl MulAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl MulAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl MulAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl MulAssign<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl MulAssign<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl MulAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl MulAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl MulAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl MulAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl MulAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl MulAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl MulAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl MulAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl MulAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl MulAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl MulAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl MulAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl MulAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl MulAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl MulAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl MulAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl MulAssign<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl MulAssign<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl MulAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl MulAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl MulAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl MulAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl MulAssign<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl MulAssign<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl MulAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl MulAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl MulAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl MulAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl MulAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl MulAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl MulAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl MulAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl MulAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl MulAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl MulAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl MulAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl MulAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl MulAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl MulAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl MulAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.Neg.js b/implementors/core/ops/arith/trait.Neg.js new file mode 100644 index 000000000..666f43eed --- /dev/null +++ b/implementors/core/ops/arith/trait.Neg.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Neg for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Neg for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Neg for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Neg for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Neg for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Neg for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Neg for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Neg for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Neg for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Neg for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Neg for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Neg for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Neg for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Neg for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Neg for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Neg for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Neg for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Neg for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Neg for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Neg for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Neg for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Neg for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Neg for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Neg for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Neg for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Neg for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Neg for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Neg for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Neg for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Neg for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Neg for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.Rem.js b/implementors/core/ops/arith/trait.Rem.js new file mode 100644 index 000000000..4d903342f --- /dev/null +++ b/implementors/core/ops/arith/trait.Rem.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Rem<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Rem<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Rem<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Rem<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Rem<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Rem<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Rem<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Rem<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Rem<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Rem<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Rem<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Rem<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Rem<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Rem<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Rem<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Rem<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Rem<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Rem<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Rem<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Rem<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Rem<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Rem<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Rem<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Rem<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Rem<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Rem<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Rem<Simd<[f32; 2]>> for f32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Rem<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Rem<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Rem<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Rem<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Rem<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Rem<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Rem<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Rem<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Rem<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Rem<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Rem<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Rem<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Rem<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Rem<Simd<[f32; 4]>> for f32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Rem<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Rem<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Rem<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Rem<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Rem<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Rem<Simd<[f64; 2]>> for f64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Rem<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Rem<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Rem<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Rem<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Rem<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Rem<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Rem<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Rem<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Rem<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Rem<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Rem<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Rem<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Rem<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Rem<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Rem<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Rem<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Rem<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Rem<Simd<[f32; 8]>> for f32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Rem<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Rem<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Rem<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Rem<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Rem<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Rem<Simd<[f64; 4]>> for f64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Rem<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Rem<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Rem<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Rem<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Rem<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Rem<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Rem<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Rem<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Rem<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Rem<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Rem<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Rem<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Rem<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Rem<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Rem<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Rem<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Rem<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Rem<Simd<[f32; 16]>> for f32","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Rem<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Rem<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Rem<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Rem<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Rem<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Rem<Simd<[f64; 8]>> for f64","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Rem<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Rem<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Rem<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Rem<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Rem<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Rem<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Rem<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Rem<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Rem<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Rem<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Rem<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Rem<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Rem<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Rem<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl Rem<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Rem<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Rem<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.RemAssign.js b/implementors/core/ops/arith/trait.RemAssign.js new file mode 100644 index 000000000..5164e3789 --- /dev/null +++ b/implementors/core/ops/arith/trait.RemAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl RemAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl RemAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl RemAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl RemAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl RemAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl RemAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl RemAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl RemAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl RemAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl RemAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl RemAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl RemAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl RemAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl RemAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl RemAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl RemAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl RemAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl RemAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl RemAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl RemAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl RemAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl RemAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl RemAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl RemAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl RemAssign<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl RemAssign<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl RemAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl RemAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl RemAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl RemAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl RemAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl RemAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl RemAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl RemAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl RemAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl RemAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl RemAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl RemAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl RemAssign<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl RemAssign<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl RemAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl RemAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl RemAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl RemAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl RemAssign<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl RemAssign<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl RemAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl RemAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl RemAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl RemAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl RemAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl RemAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl RemAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl RemAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl RemAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl RemAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl RemAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl RemAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl RemAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl RemAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl RemAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl RemAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl RemAssign<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl RemAssign<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl RemAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl RemAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl RemAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl RemAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl RemAssign<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl RemAssign<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl RemAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl RemAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl RemAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl RemAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl RemAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl RemAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl RemAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl RemAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl RemAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl RemAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl RemAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl RemAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl RemAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl RemAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl RemAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl RemAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl RemAssign<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl RemAssign<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl RemAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl RemAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl RemAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl RemAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl RemAssign<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl RemAssign<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl RemAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl RemAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl RemAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl RemAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl RemAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl RemAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl RemAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl RemAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl RemAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl RemAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl RemAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl RemAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl RemAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl RemAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl RemAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl RemAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.Sub.js b/implementors/core/ops/arith/trait.Sub.js new file mode 100644 index 000000000..4f41d07b9 --- /dev/null +++ b/implementors/core/ops/arith/trait.Sub.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Sub<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Sub<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Sub<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Sub<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Sub<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Sub<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Sub<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Sub<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Sub<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Sub<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Sub<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Sub<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Sub<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Sub<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Sub<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Sub<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Sub<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Sub<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Sub<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Sub<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Sub<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Sub<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Sub<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Sub<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Sub<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Sub<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl Sub<Simd<[f32; 2]>> for f32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Sub<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Sub<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Sub<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Sub<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Sub<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Sub<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Sub<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Sub<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Sub<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Sub<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Sub<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Sub<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Sub<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl Sub<Simd<[f32; 4]>> for f32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Sub<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Sub<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Sub<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Sub<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Sub<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl Sub<Simd<[f64; 2]>> for f64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Sub<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Sub<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Sub<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Sub<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Sub<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Sub<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Sub<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Sub<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Sub<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Sub<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Sub<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Sub<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Sub<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Sub<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Sub<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Sub<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Sub<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl Sub<Simd<[f32; 8]>> for f32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Sub<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Sub<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Sub<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Sub<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Sub<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl Sub<Simd<[f64; 4]>> for f64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Sub<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Sub<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Sub<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Sub<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Sub<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Sub<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Sub<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Sub<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Sub<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Sub<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Sub<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Sub<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Sub<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Sub<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Sub<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Sub<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Sub<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl Sub<Simd<[f32; 16]>> for f32","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Sub<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Sub<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Sub<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Sub<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Sub<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl Sub<Simd<[f64; 8]>> for f64","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Sub<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Sub<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Sub<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Sub<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Sub<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Sub<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Sub<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Sub<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Sub<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Sub<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Sub<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Sub<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Sub<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Sub<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl Sub<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Sub<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Sub<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/arith/trait.SubAssign.js b/implementors/core/ops/arith/trait.SubAssign.js new file mode 100644 index 000000000..ac4e77ced --- /dev/null +++ b/implementors/core/ops/arith/trait.SubAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl SubAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl SubAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl SubAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl SubAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl SubAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl SubAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl SubAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl SubAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl SubAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl SubAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl SubAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl SubAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl SubAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl SubAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl SubAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl SubAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl SubAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl SubAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl SubAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl SubAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl SubAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl SubAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl SubAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl SubAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl SubAssign<Simd<[f32; 2]>> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl SubAssign<f32> for f32x2","synthetic":false,"types":["packed_simd::v64::f32x2"]},{"text":"impl SubAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl SubAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl SubAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl SubAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl SubAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl SubAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl SubAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl SubAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl SubAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl SubAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl SubAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl SubAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl SubAssign<Simd<[f32; 4]>> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl SubAssign<f32> for f32x4","synthetic":false,"types":["packed_simd::v128::f32x4"]},{"text":"impl SubAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl SubAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl SubAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl SubAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl SubAssign<Simd<[f64; 2]>> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl SubAssign<f64> for f64x2","synthetic":false,"types":["packed_simd::v128::f64x2"]},{"text":"impl SubAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl SubAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl SubAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl SubAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl SubAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl SubAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl SubAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl SubAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl SubAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl SubAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl SubAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl SubAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl SubAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl SubAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl SubAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl SubAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl SubAssign<Simd<[f32; 8]>> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl SubAssign<f32> for f32x8","synthetic":false,"types":["packed_simd::v256::f32x8"]},{"text":"impl SubAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl SubAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl SubAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl SubAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl SubAssign<Simd<[f64; 4]>> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl SubAssign<f64> for f64x4","synthetic":false,"types":["packed_simd::v256::f64x4"]},{"text":"impl SubAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl SubAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl SubAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl SubAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl SubAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl SubAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl SubAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl SubAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl SubAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl SubAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl SubAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl SubAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl SubAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl SubAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl SubAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl SubAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl SubAssign<Simd<[f32; 16]>> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl SubAssign<f32> for f32x16","synthetic":false,"types":["packed_simd::v512::f32x16"]},{"text":"impl SubAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl SubAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl SubAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl SubAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl SubAssign<Simd<[f64; 8]>> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl SubAssign<f64> for f64x8","synthetic":false,"types":["packed_simd::v512::f64x8"]},{"text":"impl SubAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl SubAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl SubAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl SubAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl SubAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl SubAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl SubAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl SubAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl SubAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl SubAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl SubAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl SubAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl SubAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl SubAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl SubAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl SubAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.BitAnd.js b/implementors/core/ops/bit/trait.BitAnd.js new file mode 100644 index 000000000..23d46c43a --- /dev/null +++ b/implementors/core/ops/bit/trait.BitAnd.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl BitAnd<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitAnd<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitAnd<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitAnd<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitAnd<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m8; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitAnd<bool> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitAnd<Simd<[m8; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitAnd<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitAnd<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitAnd<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitAnd<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m8; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitAnd<bool> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitAnd<Simd<[m8; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitAnd<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitAnd<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitAnd<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitAnd<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m16; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitAnd<bool> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitAnd<Simd<[m16; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitAnd<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitAnd<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitAnd<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitAnd<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m8; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitAnd<bool> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitAnd<Simd<[m8; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitAnd<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitAnd<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitAnd<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitAnd<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m16; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitAnd<bool> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitAnd<Simd<[m16; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitAnd<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitAnd<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitAnd<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitAnd<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m32; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitAnd<bool> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitAnd<Simd<[m32; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitAnd<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitAnd<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitAnd<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitAnd<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m8; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitAnd<bool> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitAnd<Simd<[m8; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitAnd<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitAnd<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitAnd<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitAnd<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m16; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitAnd<bool> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitAnd<Simd<[m16; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitAnd<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitAnd<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitAnd<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitAnd<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m32; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitAnd<bool> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitAnd<Simd<[m32; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitAnd<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitAnd<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitAnd<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitAnd<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m64; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitAnd<bool> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitAnd<Simd<[m64; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitAnd<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitAnd<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitAnd<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitAnd<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m128; 1]>> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitAnd<bool> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitAnd<Simd<[m128; 1]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitAnd<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitAnd<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitAnd<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitAnd<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m8; 32]>> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitAnd<bool> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitAnd<Simd<[m8; 32]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitAnd<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitAnd<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitAnd<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitAnd<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m16; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitAnd<bool> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitAnd<Simd<[m16; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitAnd<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitAnd<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitAnd<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitAnd<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m32; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitAnd<bool> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitAnd<Simd<[m32; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitAnd<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitAnd<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitAnd<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitAnd<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m64; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitAnd<bool> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitAnd<Simd<[m64; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitAnd<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitAnd<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitAnd<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitAnd<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m128; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitAnd<bool> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitAnd<Simd<[m128; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitAnd<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitAnd<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitAnd<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitAnd<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m8; 64]>> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitAnd<bool> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitAnd<Simd<[m8; 64]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitAnd<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitAnd<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitAnd<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitAnd<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m16; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitAnd<bool> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitAnd<Simd<[m16; 32]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitAnd<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitAnd<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitAnd<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitAnd<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m32; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitAnd<bool> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitAnd<Simd<[m32; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitAnd<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitAnd<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitAnd<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitAnd<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m64; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitAnd<bool> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitAnd<Simd<[m64; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitAnd<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitAnd<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitAnd<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitAnd<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[m128; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitAnd<bool> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitAnd<Simd<[m128; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitAnd<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitAnd<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitAnd<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitAnd<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[msize; 2]>> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitAnd<bool> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitAnd<Simd<[msize; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitAnd<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitAnd<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitAnd<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitAnd<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[msize; 4]>> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitAnd<bool> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitAnd<Simd<[msize; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitAnd<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitAnd<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitAnd<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitAnd<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]},{"text":"impl BitAnd<Simd<[msize; 8]>> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitAnd<bool> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitAnd<Simd<[msize; 8]>> for bool","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.BitAndAssign.js b/implementors/core/ops/bit/trait.BitAndAssign.js new file mode 100644 index 000000000..34662bf92 --- /dev/null +++ b/implementors/core/ops/bit/trait.BitAndAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl BitAndAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitAndAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitAndAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitAndAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitAndAssign<Simd<[m8; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitAndAssign<bool> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitAndAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitAndAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitAndAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitAndAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitAndAssign<Simd<[m8; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitAndAssign<bool> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitAndAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitAndAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitAndAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitAndAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitAndAssign<Simd<[m16; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitAndAssign<bool> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitAndAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitAndAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitAndAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitAndAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitAndAssign<Simd<[m8; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitAndAssign<bool> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitAndAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitAndAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitAndAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitAndAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitAndAssign<Simd<[m16; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitAndAssign<bool> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitAndAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitAndAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitAndAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitAndAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitAndAssign<Simd<[m32; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitAndAssign<bool> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitAndAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitAndAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitAndAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitAndAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitAndAssign<Simd<[m8; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitAndAssign<bool> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitAndAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitAndAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitAndAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitAndAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitAndAssign<Simd<[m16; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitAndAssign<bool> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitAndAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitAndAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitAndAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitAndAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitAndAssign<Simd<[m32; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitAndAssign<bool> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitAndAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitAndAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitAndAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitAndAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitAndAssign<Simd<[m64; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitAndAssign<bool> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitAndAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitAndAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitAndAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitAndAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitAndAssign<Simd<[m128; 1]>> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitAndAssign<bool> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitAndAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitAndAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitAndAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitAndAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitAndAssign<Simd<[m8; 32]>> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitAndAssign<bool> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitAndAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitAndAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitAndAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitAndAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitAndAssign<Simd<[m16; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitAndAssign<bool> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitAndAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitAndAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitAndAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitAndAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitAndAssign<Simd<[m32; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitAndAssign<bool> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitAndAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitAndAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitAndAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitAndAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitAndAssign<Simd<[m64; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitAndAssign<bool> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitAndAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitAndAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitAndAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitAndAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitAndAssign<Simd<[m128; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitAndAssign<bool> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitAndAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitAndAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitAndAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitAndAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitAndAssign<Simd<[m8; 64]>> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitAndAssign<bool> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitAndAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitAndAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitAndAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitAndAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitAndAssign<Simd<[m16; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitAndAssign<bool> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitAndAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitAndAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitAndAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitAndAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitAndAssign<Simd<[m32; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitAndAssign<bool> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitAndAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitAndAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitAndAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitAndAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitAndAssign<Simd<[m64; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitAndAssign<bool> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitAndAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitAndAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitAndAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitAndAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitAndAssign<Simd<[m128; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitAndAssign<bool> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitAndAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitAndAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitAndAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitAndAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitAndAssign<Simd<[msize; 2]>> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitAndAssign<bool> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitAndAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitAndAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitAndAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitAndAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitAndAssign<Simd<[msize; 4]>> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitAndAssign<bool> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitAndAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitAndAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitAndAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitAndAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitAndAssign<Simd<[msize; 8]>> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitAndAssign<bool> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.BitOr.js b/implementors/core/ops/bit/trait.BitOr.js new file mode 100644 index 000000000..5aa6b22c8 --- /dev/null +++ b/implementors/core/ops/bit/trait.BitOr.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl BitOr<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitOr<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitOr<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitOr<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitOr<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m8; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitOr<bool> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitOr<Simd<[m8; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitOr<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitOr<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitOr<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitOr<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m8; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitOr<bool> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitOr<Simd<[m8; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitOr<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitOr<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitOr<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitOr<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m16; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitOr<bool> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitOr<Simd<[m16; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitOr<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitOr<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitOr<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitOr<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m8; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitOr<bool> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitOr<Simd<[m8; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitOr<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitOr<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitOr<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitOr<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m16; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitOr<bool> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitOr<Simd<[m16; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitOr<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitOr<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitOr<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitOr<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m32; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitOr<bool> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitOr<Simd<[m32; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitOr<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitOr<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitOr<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitOr<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m8; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitOr<bool> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitOr<Simd<[m8; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitOr<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitOr<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitOr<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitOr<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m16; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitOr<bool> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitOr<Simd<[m16; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitOr<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitOr<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitOr<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitOr<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m32; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitOr<bool> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitOr<Simd<[m32; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitOr<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitOr<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitOr<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitOr<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m64; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitOr<bool> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitOr<Simd<[m64; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitOr<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitOr<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitOr<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitOr<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m128; 1]>> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitOr<bool> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitOr<Simd<[m128; 1]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitOr<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitOr<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitOr<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitOr<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m8; 32]>> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitOr<bool> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitOr<Simd<[m8; 32]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitOr<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitOr<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitOr<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitOr<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m16; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitOr<bool> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitOr<Simd<[m16; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitOr<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitOr<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitOr<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitOr<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m32; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitOr<bool> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitOr<Simd<[m32; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitOr<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitOr<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitOr<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitOr<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m64; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitOr<bool> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitOr<Simd<[m64; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitOr<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitOr<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitOr<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitOr<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m128; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitOr<bool> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitOr<Simd<[m128; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitOr<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitOr<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitOr<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitOr<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m8; 64]>> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitOr<bool> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitOr<Simd<[m8; 64]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitOr<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitOr<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitOr<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitOr<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m16; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitOr<bool> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitOr<Simd<[m16; 32]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitOr<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitOr<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitOr<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitOr<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m32; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitOr<bool> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitOr<Simd<[m32; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitOr<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitOr<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitOr<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitOr<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m64; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitOr<bool> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitOr<Simd<[m64; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitOr<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitOr<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitOr<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitOr<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[m128; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitOr<bool> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitOr<Simd<[m128; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitOr<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitOr<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitOr<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitOr<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[msize; 2]>> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitOr<bool> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitOr<Simd<[msize; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitOr<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitOr<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitOr<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitOr<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[msize; 4]>> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitOr<bool> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitOr<Simd<[msize; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitOr<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitOr<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitOr<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitOr<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]},{"text":"impl BitOr<Simd<[msize; 8]>> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitOr<bool> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitOr<Simd<[msize; 8]>> for bool","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.BitOrAssign.js b/implementors/core/ops/bit/trait.BitOrAssign.js new file mode 100644 index 000000000..437c3cae4 --- /dev/null +++ b/implementors/core/ops/bit/trait.BitOrAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl BitOrAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitOrAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitOrAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitOrAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitOrAssign<Simd<[m8; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitOrAssign<bool> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitOrAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitOrAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitOrAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitOrAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitOrAssign<Simd<[m8; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitOrAssign<bool> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitOrAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitOrAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitOrAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitOrAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitOrAssign<Simd<[m16; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitOrAssign<bool> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitOrAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitOrAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitOrAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitOrAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitOrAssign<Simd<[m8; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitOrAssign<bool> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitOrAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitOrAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitOrAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitOrAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitOrAssign<Simd<[m16; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitOrAssign<bool> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitOrAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitOrAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitOrAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitOrAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitOrAssign<Simd<[m32; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitOrAssign<bool> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitOrAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitOrAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitOrAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitOrAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitOrAssign<Simd<[m8; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitOrAssign<bool> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitOrAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitOrAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitOrAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitOrAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitOrAssign<Simd<[m16; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitOrAssign<bool> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitOrAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitOrAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitOrAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitOrAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitOrAssign<Simd<[m32; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitOrAssign<bool> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitOrAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitOrAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitOrAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitOrAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitOrAssign<Simd<[m64; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitOrAssign<bool> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitOrAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitOrAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitOrAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitOrAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitOrAssign<Simd<[m128; 1]>> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitOrAssign<bool> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitOrAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitOrAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitOrAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitOrAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitOrAssign<Simd<[m8; 32]>> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitOrAssign<bool> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitOrAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitOrAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitOrAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitOrAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitOrAssign<Simd<[m16; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitOrAssign<bool> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitOrAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitOrAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitOrAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitOrAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitOrAssign<Simd<[m32; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitOrAssign<bool> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitOrAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitOrAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitOrAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitOrAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitOrAssign<Simd<[m64; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitOrAssign<bool> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitOrAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitOrAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitOrAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitOrAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitOrAssign<Simd<[m128; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitOrAssign<bool> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitOrAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitOrAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitOrAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitOrAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitOrAssign<Simd<[m8; 64]>> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitOrAssign<bool> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitOrAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitOrAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitOrAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitOrAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitOrAssign<Simd<[m16; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitOrAssign<bool> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitOrAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitOrAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitOrAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitOrAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitOrAssign<Simd<[m32; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitOrAssign<bool> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitOrAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitOrAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitOrAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitOrAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitOrAssign<Simd<[m64; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitOrAssign<bool> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitOrAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitOrAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitOrAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitOrAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitOrAssign<Simd<[m128; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitOrAssign<bool> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitOrAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitOrAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitOrAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitOrAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitOrAssign<Simd<[msize; 2]>> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitOrAssign<bool> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitOrAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitOrAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitOrAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitOrAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitOrAssign<Simd<[msize; 4]>> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitOrAssign<bool> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitOrAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitOrAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitOrAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitOrAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitOrAssign<Simd<[msize; 8]>> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitOrAssign<bool> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.BitXor.js b/implementors/core/ops/bit/trait.BitXor.js new file mode 100644 index 000000000..c6ee6af8c --- /dev/null +++ b/implementors/core/ops/bit/trait.BitXor.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl BitXor<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitXor<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitXor<Simd<[i8; 2]>> for i8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitXor<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitXor<Simd<[u8; 2]>> for u8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m8; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitXor<bool> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitXor<Simd<[m8; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitXor<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitXor<Simd<[i8; 4]>> for i8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitXor<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitXor<Simd<[u8; 4]>> for u8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m8; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitXor<bool> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitXor<Simd<[m8; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitXor<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitXor<Simd<[i16; 2]>> for i16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitXor<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitXor<Simd<[u16; 2]>> for u16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m16; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitXor<bool> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitXor<Simd<[m16; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitXor<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitXor<Simd<[i8; 8]>> for i8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitXor<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitXor<Simd<[u8; 8]>> for u8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m8; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitXor<bool> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitXor<Simd<[m8; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitXor<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitXor<Simd<[i16; 4]>> for i16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitXor<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitXor<Simd<[u16; 4]>> for u16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m16; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitXor<bool> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitXor<Simd<[m16; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitXor<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitXor<Simd<[i32; 2]>> for i32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitXor<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitXor<Simd<[u32; 2]>> for u32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m32; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitXor<bool> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitXor<Simd<[m32; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitXor<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitXor<Simd<[i8; 16]>> for i8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitXor<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitXor<Simd<[u8; 16]>> for u8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m8; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitXor<bool> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitXor<Simd<[m8; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitXor<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitXor<Simd<[i16; 8]>> for i16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitXor<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitXor<Simd<[u16; 8]>> for u16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m16; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitXor<bool> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitXor<Simd<[m16; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitXor<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitXor<Simd<[i32; 4]>> for i32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitXor<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitXor<Simd<[u32; 4]>> for u32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m32; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitXor<bool> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitXor<Simd<[m32; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitXor<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitXor<Simd<[i64; 2]>> for i64","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitXor<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitXor<Simd<[u64; 2]>> for u64","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m64; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitXor<bool> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitXor<Simd<[m64; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitXor<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitXor<Simd<[i128; 1]>> for i128","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitXor<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitXor<Simd<[u128; 1]>> for u128","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m128; 1]>> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitXor<bool> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitXor<Simd<[m128; 1]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitXor<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitXor<Simd<[i8; 32]>> for i8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitXor<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitXor<Simd<[u8; 32]>> for u8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m8; 32]>> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitXor<bool> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitXor<Simd<[m8; 32]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitXor<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitXor<Simd<[i16; 16]>> for i16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitXor<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitXor<Simd<[u16; 16]>> for u16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m16; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitXor<bool> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitXor<Simd<[m16; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitXor<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitXor<Simd<[i32; 8]>> for i32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitXor<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitXor<Simd<[u32; 8]>> for u32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m32; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitXor<bool> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitXor<Simd<[m32; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitXor<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitXor<Simd<[i64; 4]>> for i64","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitXor<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitXor<Simd<[u64; 4]>> for u64","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m64; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitXor<bool> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitXor<Simd<[m64; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitXor<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitXor<Simd<[i128; 2]>> for i128","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitXor<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitXor<Simd<[u128; 2]>> for u128","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m128; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitXor<bool> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitXor<Simd<[m128; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitXor<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitXor<Simd<[i8; 64]>> for i8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitXor<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitXor<Simd<[u8; 64]>> for u8","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m8; 64]>> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitXor<bool> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitXor<Simd<[m8; 64]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitXor<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitXor<Simd<[i16; 32]>> for i16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitXor<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitXor<Simd<[u16; 32]>> for u16","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m16; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitXor<bool> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitXor<Simd<[m16; 32]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitXor<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitXor<Simd<[i32; 16]>> for i32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitXor<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitXor<Simd<[u32; 16]>> for u32","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m32; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitXor<bool> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitXor<Simd<[m32; 16]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitXor<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitXor<Simd<[i64; 8]>> for i64","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitXor<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitXor<Simd<[u64; 8]>> for u64","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m64; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitXor<bool> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitXor<Simd<[m64; 8]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitXor<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitXor<Simd<[i128; 4]>> for i128","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitXor<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitXor<Simd<[u128; 4]>> for u128","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[m128; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitXor<bool> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitXor<Simd<[m128; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitXor<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitXor<Simd<[isize; 2]>> for isize","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitXor<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitXor<Simd<[usize; 2]>> for usize","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[msize; 2]>> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitXor<bool> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitXor<Simd<[msize; 2]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitXor<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitXor<Simd<[isize; 4]>> for isize","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitXor<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitXor<Simd<[usize; 4]>> for usize","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[msize; 4]>> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitXor<bool> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitXor<Simd<[msize; 4]>> for bool","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitXor<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitXor<Simd<[isize; 8]>> for isize","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitXor<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitXor<Simd<[usize; 8]>> for usize","synthetic":false,"types":[]},{"text":"impl BitXor<Simd<[msize; 8]>> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitXor<bool> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitXor<Simd<[msize; 8]>> for bool","synthetic":false,"types":[]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.BitXorAssign.js b/implementors/core/ops/bit/trait.BitXorAssign.js new file mode 100644 index 000000000..6ebdfff7d --- /dev/null +++ b/implementors/core/ops/bit/trait.BitXorAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl BitXorAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitXorAssign<i8> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl BitXorAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitXorAssign<u8> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl BitXorAssign<Simd<[m8; 2]>> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitXorAssign<bool> for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl BitXorAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitXorAssign<i8> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl BitXorAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitXorAssign<u8> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl BitXorAssign<Simd<[m8; 4]>> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitXorAssign<bool> for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl BitXorAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitXorAssign<i16> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl BitXorAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitXorAssign<u16> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl BitXorAssign<Simd<[m16; 2]>> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitXorAssign<bool> for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl BitXorAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitXorAssign<i8> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl BitXorAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitXorAssign<u8> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl BitXorAssign<Simd<[m8; 8]>> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitXorAssign<bool> for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl BitXorAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitXorAssign<i16> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl BitXorAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitXorAssign<u16> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl BitXorAssign<Simd<[m16; 4]>> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitXorAssign<bool> for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl BitXorAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitXorAssign<i32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl BitXorAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitXorAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl BitXorAssign<Simd<[m32; 2]>> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitXorAssign<bool> for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl BitXorAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitXorAssign<i8> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl BitXorAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitXorAssign<u8> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl BitXorAssign<Simd<[m8; 16]>> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitXorAssign<bool> for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl BitXorAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitXorAssign<i16> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl BitXorAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitXorAssign<u16> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl BitXorAssign<Simd<[m16; 8]>> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitXorAssign<bool> for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl BitXorAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitXorAssign<i32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl BitXorAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitXorAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl BitXorAssign<Simd<[m32; 4]>> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitXorAssign<bool> for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl BitXorAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitXorAssign<i64> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl BitXorAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitXorAssign<u64> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl BitXorAssign<Simd<[m64; 2]>> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitXorAssign<bool> for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl BitXorAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitXorAssign<i128> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl BitXorAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitXorAssign<u128> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl BitXorAssign<Simd<[m128; 1]>> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitXorAssign<bool> for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl BitXorAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitXorAssign<i8> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl BitXorAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitXorAssign<u8> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl BitXorAssign<Simd<[m8; 32]>> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitXorAssign<bool> for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl BitXorAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitXorAssign<i16> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl BitXorAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitXorAssign<u16> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl BitXorAssign<Simd<[m16; 16]>> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitXorAssign<bool> for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl BitXorAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitXorAssign<i32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl BitXorAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitXorAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl BitXorAssign<Simd<[m32; 8]>> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitXorAssign<bool> for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl BitXorAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitXorAssign<i64> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl BitXorAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitXorAssign<u64> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl BitXorAssign<Simd<[m64; 4]>> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitXorAssign<bool> for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl BitXorAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitXorAssign<i128> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl BitXorAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitXorAssign<u128> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl BitXorAssign<Simd<[m128; 2]>> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitXorAssign<bool> for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl BitXorAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitXorAssign<i8> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl BitXorAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitXorAssign<u8> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl BitXorAssign<Simd<[m8; 64]>> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitXorAssign<bool> for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl BitXorAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitXorAssign<i16> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl BitXorAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitXorAssign<u16> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl BitXorAssign<Simd<[m16; 32]>> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitXorAssign<bool> for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl BitXorAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitXorAssign<i32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl BitXorAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitXorAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl BitXorAssign<Simd<[m32; 16]>> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitXorAssign<bool> for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl BitXorAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitXorAssign<i64> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl BitXorAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitXorAssign<u64> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl BitXorAssign<Simd<[m64; 8]>> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitXorAssign<bool> for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl BitXorAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitXorAssign<i128> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl BitXorAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitXorAssign<u128> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl BitXorAssign<Simd<[m128; 4]>> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitXorAssign<bool> for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl BitXorAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitXorAssign<isize> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl BitXorAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitXorAssign<usize> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl BitXorAssign<Simd<[msize; 2]>> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitXorAssign<bool> for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl BitXorAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitXorAssign<isize> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl BitXorAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitXorAssign<usize> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl BitXorAssign<Simd<[msize; 4]>> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitXorAssign<bool> for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl BitXorAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitXorAssign<isize> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl BitXorAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitXorAssign<usize> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl BitXorAssign<Simd<[msize; 8]>> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]},{"text":"impl BitXorAssign<bool> for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.Not.js b/implementors/core/ops/bit/trait.Not.js new file mode 100644 index 000000000..2d27be379 --- /dev/null +++ b/implementors/core/ops/bit/trait.Not.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Not for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Not for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Not for m8x2","synthetic":false,"types":["packed_simd::v16::m8x2"]},{"text":"impl Not for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Not for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Not for m8x4","synthetic":false,"types":["packed_simd::v32::m8x4"]},{"text":"impl Not for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Not for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Not for m16x2","synthetic":false,"types":["packed_simd::v32::m16x2"]},{"text":"impl Not for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Not for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Not for m8x8","synthetic":false,"types":["packed_simd::v64::m8x8"]},{"text":"impl Not for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Not for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Not for m16x4","synthetic":false,"types":["packed_simd::v64::m16x4"]},{"text":"impl Not for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Not for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Not for m32x2","synthetic":false,"types":["packed_simd::v64::m32x2"]},{"text":"impl Not for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Not for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Not for m8x16","synthetic":false,"types":["packed_simd::v128::m8x16"]},{"text":"impl Not for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Not for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Not for m16x8","synthetic":false,"types":["packed_simd::v128::m16x8"]},{"text":"impl Not for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Not for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Not for m32x4","synthetic":false,"types":["packed_simd::v128::m32x4"]},{"text":"impl Not for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Not for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Not for m64x2","synthetic":false,"types":["packed_simd::v128::m64x2"]},{"text":"impl Not for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Not for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Not for m128x1","synthetic":false,"types":["packed_simd::v128::m128x1"]},{"text":"impl Not for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Not for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Not for m8x32","synthetic":false,"types":["packed_simd::v256::m8x32"]},{"text":"impl Not for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Not for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Not for m16x16","synthetic":false,"types":["packed_simd::v256::m16x16"]},{"text":"impl Not for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Not for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Not for m32x8","synthetic":false,"types":["packed_simd::v256::m32x8"]},{"text":"impl Not for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Not for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Not for m64x4","synthetic":false,"types":["packed_simd::v256::m64x4"]},{"text":"impl Not for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Not for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Not for m128x2","synthetic":false,"types":["packed_simd::v256::m128x2"]},{"text":"impl Not for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Not for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Not for m8x64","synthetic":false,"types":["packed_simd::v512::m8x64"]},{"text":"impl Not for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Not for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Not for m16x32","synthetic":false,"types":["packed_simd::v512::m16x32"]},{"text":"impl Not for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Not for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Not for m32x16","synthetic":false,"types":["packed_simd::v512::m32x16"]},{"text":"impl Not for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Not for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Not for m64x8","synthetic":false,"types":["packed_simd::v512::m64x8"]},{"text":"impl Not for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Not for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Not for m128x4","synthetic":false,"types":["packed_simd::v512::m128x4"]},{"text":"impl Not for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Not for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Not for msizex2","synthetic":false,"types":["packed_simd::vSize::msizex2"]},{"text":"impl Not for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Not for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Not for msizex4","synthetic":false,"types":["packed_simd::vSize::msizex4"]},{"text":"impl Not for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Not for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Not for msizex8","synthetic":false,"types":["packed_simd::vSize::msizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.Shl.js b/implementors/core/ops/bit/trait.Shl.js new file mode 100644 index 000000000..f1b9c7363 --- /dev/null +++ b/implementors/core/ops/bit/trait.Shl.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Shl<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Shl<u32> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Shl<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Shl<u32> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Shl<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Shl<u32> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Shl<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Shl<u32> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Shl<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Shl<u32> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Shl<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Shl<u32> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Shl<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Shl<u32> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Shl<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Shl<u32> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Shl<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Shl<u32> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Shl<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Shl<u32> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Shl<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Shl<u32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Shl<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Shl<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Shl<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Shl<u32> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Shl<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Shl<u32> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Shl<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Shl<u32> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Shl<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Shl<u32> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Shl<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Shl<u32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Shl<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Shl<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Shl<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Shl<u32> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Shl<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Shl<u32> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Shl<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Shl<u32> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Shl<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Shl<u32> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Shl<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Shl<u32> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Shl<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Shl<u32> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Shl<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Shl<u32> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Shl<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Shl<u32> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Shl<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Shl<u32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Shl<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Shl<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Shl<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Shl<u32> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Shl<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Shl<u32> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Shl<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Shl<u32> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Shl<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Shl<u32> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Shl<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Shl<u32> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Shl<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Shl<u32> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Shl<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Shl<u32> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Shl<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Shl<u32> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Shl<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Shl<u32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Shl<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Shl<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Shl<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Shl<u32> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Shl<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Shl<u32> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Shl<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Shl<u32> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Shl<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Shl<u32> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Shl<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Shl<u32> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Shl<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Shl<u32> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Shl<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Shl<u32> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Shl<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Shl<u32> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Shl<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Shl<u32> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Shl<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Shl<u32> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.ShlAssign.js b/implementors/core/ops/bit/trait.ShlAssign.js new file mode 100644 index 000000000..a36b64f98 --- /dev/null +++ b/implementors/core/ops/bit/trait.ShlAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl ShlAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl ShlAssign<u32> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl ShlAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl ShlAssign<u32> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl ShlAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl ShlAssign<u32> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl ShlAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl ShlAssign<u32> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl ShlAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl ShlAssign<u32> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl ShlAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl ShlAssign<u32> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl ShlAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl ShlAssign<u32> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl ShlAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl ShlAssign<u32> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl ShlAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl ShlAssign<u32> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl ShlAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl ShlAssign<u32> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl ShlAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl ShlAssign<u32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl ShlAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl ShlAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl ShlAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl ShlAssign<u32> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl ShlAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl ShlAssign<u32> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl ShlAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl ShlAssign<u32> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl ShlAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl ShlAssign<u32> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl ShlAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl ShlAssign<u32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl ShlAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl ShlAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl ShlAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl ShlAssign<u32> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl ShlAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl ShlAssign<u32> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl ShlAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl ShlAssign<u32> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl ShlAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl ShlAssign<u32> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl ShlAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl ShlAssign<u32> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl ShlAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl ShlAssign<u32> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl ShlAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl ShlAssign<u32> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl ShlAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl ShlAssign<u32> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl ShlAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl ShlAssign<u32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl ShlAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl ShlAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl ShlAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl ShlAssign<u32> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl ShlAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl ShlAssign<u32> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl ShlAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl ShlAssign<u32> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl ShlAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl ShlAssign<u32> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl ShlAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl ShlAssign<u32> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl ShlAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl ShlAssign<u32> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl ShlAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl ShlAssign<u32> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl ShlAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl ShlAssign<u32> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl ShlAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl ShlAssign<u32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl ShlAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl ShlAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl ShlAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl ShlAssign<u32> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl ShlAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl ShlAssign<u32> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl ShlAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl ShlAssign<u32> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl ShlAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl ShlAssign<u32> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl ShlAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl ShlAssign<u32> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl ShlAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl ShlAssign<u32> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl ShlAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl ShlAssign<u32> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl ShlAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl ShlAssign<u32> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl ShlAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl ShlAssign<u32> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl ShlAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl ShlAssign<u32> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.Shr.js b/implementors/core/ops/bit/trait.Shr.js new file mode 100644 index 000000000..b51faf2fb --- /dev/null +++ b/implementors/core/ops/bit/trait.Shr.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl Shr<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Shr<u32> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl Shr<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Shr<u32> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl Shr<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Shr<u32> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl Shr<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Shr<u32> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl Shr<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Shr<u32> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl Shr<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Shr<u32> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl Shr<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Shr<u32> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl Shr<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Shr<u32> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl Shr<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Shr<u32> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl Shr<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Shr<u32> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl Shr<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Shr<u32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl Shr<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Shr<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl Shr<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Shr<u32> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl Shr<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Shr<u32> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl Shr<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Shr<u32> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl Shr<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Shr<u32> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl Shr<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Shr<u32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl Shr<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Shr<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl Shr<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Shr<u32> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl Shr<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Shr<u32> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl Shr<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Shr<u32> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl Shr<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Shr<u32> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl Shr<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Shr<u32> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl Shr<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Shr<u32> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl Shr<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Shr<u32> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl Shr<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Shr<u32> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl Shr<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Shr<u32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl Shr<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Shr<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl Shr<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Shr<u32> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl Shr<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Shr<u32> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl Shr<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Shr<u32> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl Shr<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Shr<u32> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl Shr<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Shr<u32> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl Shr<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Shr<u32> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl Shr<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Shr<u32> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl Shr<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Shr<u32> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl Shr<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Shr<u32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl Shr<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Shr<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl Shr<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Shr<u32> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl Shr<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Shr<u32> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl Shr<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Shr<u32> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl Shr<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Shr<u32> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl Shr<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Shr<u32> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl Shr<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Shr<u32> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl Shr<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Shr<u32> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl Shr<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Shr<u32> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl Shr<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Shr<u32> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl Shr<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl Shr<u32> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/core/ops/bit/trait.ShrAssign.js b/implementors/core/ops/bit/trait.ShrAssign.js new file mode 100644 index 000000000..acc6d5d58 --- /dev/null +++ b/implementors/core/ops/bit/trait.ShrAssign.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = [{"text":"impl ShrAssign<Simd<[i8; 2]>> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl ShrAssign<u32> for i8x2","synthetic":false,"types":["packed_simd::v16::i8x2"]},{"text":"impl ShrAssign<Simd<[u8; 2]>> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl ShrAssign<u32> for u8x2","synthetic":false,"types":["packed_simd::v16::u8x2"]},{"text":"impl ShrAssign<Simd<[i8; 4]>> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl ShrAssign<u32> for i8x4","synthetic":false,"types":["packed_simd::v32::i8x4"]},{"text":"impl ShrAssign<Simd<[u8; 4]>> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl ShrAssign<u32> for u8x4","synthetic":false,"types":["packed_simd::v32::u8x4"]},{"text":"impl ShrAssign<Simd<[i16; 2]>> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl ShrAssign<u32> for i16x2","synthetic":false,"types":["packed_simd::v32::i16x2"]},{"text":"impl ShrAssign<Simd<[u16; 2]>> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl ShrAssign<u32> for u16x2","synthetic":false,"types":["packed_simd::v32::u16x2"]},{"text":"impl ShrAssign<Simd<[i8; 8]>> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl ShrAssign<u32> for i8x8","synthetic":false,"types":["packed_simd::v64::i8x8"]},{"text":"impl ShrAssign<Simd<[u8; 8]>> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl ShrAssign<u32> for u8x8","synthetic":false,"types":["packed_simd::v64::u8x8"]},{"text":"impl ShrAssign<Simd<[i16; 4]>> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl ShrAssign<u32> for i16x4","synthetic":false,"types":["packed_simd::v64::i16x4"]},{"text":"impl ShrAssign<Simd<[u16; 4]>> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl ShrAssign<u32> for u16x4","synthetic":false,"types":["packed_simd::v64::u16x4"]},{"text":"impl ShrAssign<Simd<[i32; 2]>> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl ShrAssign<u32> for i32x2","synthetic":false,"types":["packed_simd::v64::i32x2"]},{"text":"impl ShrAssign<Simd<[u32; 2]>> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl ShrAssign<u32> for u32x2","synthetic":false,"types":["packed_simd::v64::u32x2"]},{"text":"impl ShrAssign<Simd<[i8; 16]>> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl ShrAssign<u32> for i8x16","synthetic":false,"types":["packed_simd::v128::i8x16"]},{"text":"impl ShrAssign<Simd<[u8; 16]>> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl ShrAssign<u32> for u8x16","synthetic":false,"types":["packed_simd::v128::u8x16"]},{"text":"impl ShrAssign<Simd<[i16; 8]>> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl ShrAssign<u32> for i16x8","synthetic":false,"types":["packed_simd::v128::i16x8"]},{"text":"impl ShrAssign<Simd<[u16; 8]>> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl ShrAssign<u32> for u16x8","synthetic":false,"types":["packed_simd::v128::u16x8"]},{"text":"impl ShrAssign<Simd<[i32; 4]>> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl ShrAssign<u32> for i32x4","synthetic":false,"types":["packed_simd::v128::i32x4"]},{"text":"impl ShrAssign<Simd<[u32; 4]>> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl ShrAssign<u32> for u32x4","synthetic":false,"types":["packed_simd::v128::u32x4"]},{"text":"impl ShrAssign<Simd<[i64; 2]>> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl ShrAssign<u32> for i64x2","synthetic":false,"types":["packed_simd::v128::i64x2"]},{"text":"impl ShrAssign<Simd<[u64; 2]>> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl ShrAssign<u32> for u64x2","synthetic":false,"types":["packed_simd::v128::u64x2"]},{"text":"impl ShrAssign<Simd<[i128; 1]>> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl ShrAssign<u32> for i128x1","synthetic":false,"types":["packed_simd::v128::i128x1"]},{"text":"impl ShrAssign<Simd<[u128; 1]>> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl ShrAssign<u32> for u128x1","synthetic":false,"types":["packed_simd::v128::u128x1"]},{"text":"impl ShrAssign<Simd<[i8; 32]>> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl ShrAssign<u32> for i8x32","synthetic":false,"types":["packed_simd::v256::i8x32"]},{"text":"impl ShrAssign<Simd<[u8; 32]>> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl ShrAssign<u32> for u8x32","synthetic":false,"types":["packed_simd::v256::u8x32"]},{"text":"impl ShrAssign<Simd<[i16; 16]>> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl ShrAssign<u32> for i16x16","synthetic":false,"types":["packed_simd::v256::i16x16"]},{"text":"impl ShrAssign<Simd<[u16; 16]>> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl ShrAssign<u32> for u16x16","synthetic":false,"types":["packed_simd::v256::u16x16"]},{"text":"impl ShrAssign<Simd<[i32; 8]>> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl ShrAssign<u32> for i32x8","synthetic":false,"types":["packed_simd::v256::i32x8"]},{"text":"impl ShrAssign<Simd<[u32; 8]>> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl ShrAssign<u32> for u32x8","synthetic":false,"types":["packed_simd::v256::u32x8"]},{"text":"impl ShrAssign<Simd<[i64; 4]>> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl ShrAssign<u32> for i64x4","synthetic":false,"types":["packed_simd::v256::i64x4"]},{"text":"impl ShrAssign<Simd<[u64; 4]>> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl ShrAssign<u32> for u64x4","synthetic":false,"types":["packed_simd::v256::u64x4"]},{"text":"impl ShrAssign<Simd<[i128; 2]>> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl ShrAssign<u32> for i128x2","synthetic":false,"types":["packed_simd::v256::i128x2"]},{"text":"impl ShrAssign<Simd<[u128; 2]>> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl ShrAssign<u32> for u128x2","synthetic":false,"types":["packed_simd::v256::u128x2"]},{"text":"impl ShrAssign<Simd<[i8; 64]>> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl ShrAssign<u32> for i8x64","synthetic":false,"types":["packed_simd::v512::i8x64"]},{"text":"impl ShrAssign<Simd<[u8; 64]>> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl ShrAssign<u32> for u8x64","synthetic":false,"types":["packed_simd::v512::u8x64"]},{"text":"impl ShrAssign<Simd<[i16; 32]>> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl ShrAssign<u32> for i16x32","synthetic":false,"types":["packed_simd::v512::i16x32"]},{"text":"impl ShrAssign<Simd<[u16; 32]>> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl ShrAssign<u32> for u16x32","synthetic":false,"types":["packed_simd::v512::u16x32"]},{"text":"impl ShrAssign<Simd<[i32; 16]>> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl ShrAssign<u32> for i32x16","synthetic":false,"types":["packed_simd::v512::i32x16"]},{"text":"impl ShrAssign<Simd<[u32; 16]>> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl ShrAssign<u32> for u32x16","synthetic":false,"types":["packed_simd::v512::u32x16"]},{"text":"impl ShrAssign<Simd<[i64; 8]>> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl ShrAssign<u32> for i64x8","synthetic":false,"types":["packed_simd::v512::i64x8"]},{"text":"impl ShrAssign<Simd<[u64; 8]>> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl ShrAssign<u32> for u64x8","synthetic":false,"types":["packed_simd::v512::u64x8"]},{"text":"impl ShrAssign<Simd<[i128; 4]>> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl ShrAssign<u32> for i128x4","synthetic":false,"types":["packed_simd::v512::i128x4"]},{"text":"impl ShrAssign<Simd<[u128; 4]>> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl ShrAssign<u32> for u128x4","synthetic":false,"types":["packed_simd::v512::u128x4"]},{"text":"impl ShrAssign<Simd<[isize; 2]>> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl ShrAssign<u32> for isizex2","synthetic":false,"types":["packed_simd::vSize::isizex2"]},{"text":"impl ShrAssign<Simd<[usize; 2]>> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl ShrAssign<u32> for usizex2","synthetic":false,"types":["packed_simd::vSize::usizex2"]},{"text":"impl ShrAssign<Simd<[isize; 4]>> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl ShrAssign<u32> for isizex4","synthetic":false,"types":["packed_simd::vSize::isizex4"]},{"text":"impl ShrAssign<Simd<[usize; 4]>> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl ShrAssign<u32> for usizex4","synthetic":false,"types":["packed_simd::vSize::usizex4"]},{"text":"impl ShrAssign<Simd<[isize; 8]>> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl ShrAssign<u32> for isizex8","synthetic":false,"types":["packed_simd::vSize::isizex8"]},{"text":"impl ShrAssign<Simd<[usize; 8]>> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]},{"text":"impl ShrAssign<u32> for usizex8","synthetic":false,"types":["packed_simd::vSize::usizex8"]}]; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/libm/trait.F32Ext.js b/implementors/libm/trait.F32Ext.js new file mode 100644 index 000000000..849ca5a74 --- /dev/null +++ b/implementors/libm/trait.F32Ext.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["libm"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/libm/trait.F64Ext.js b/implementors/libm/trait.F64Ext.js new file mode 100644 index 000000000..849ca5a74 --- /dev/null +++ b/implementors/libm/trait.F64Ext.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["libm"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/packed_simd/trait.Cast.js b/implementors/packed_simd/trait.Cast.js new file mode 100644 index 000000000..668d57c9a --- /dev/null +++ b/implementors/packed_simd/trait.Cast.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/packed_simd/trait.FromBits.js b/implementors/packed_simd/trait.FromBits.js new file mode 100644 index 000000000..668d57c9a --- /dev/null +++ b/implementors/packed_simd/trait.FromBits.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/packed_simd/trait.FromCast.js b/implementors/packed_simd/trait.FromCast.js new file mode 100644 index 000000000..668d57c9a --- /dev/null +++ b/implementors/packed_simd/trait.FromCast.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/packed_simd/trait.IntoBits.js b/implementors/packed_simd/trait.IntoBits.js new file mode 100644 index 000000000..668d57c9a --- /dev/null +++ b/implementors/packed_simd/trait.IntoBits.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/packed_simd/trait.Mask.js b/implementors/packed_simd/trait.Mask.js new file mode 100644 index 000000000..668d57c9a --- /dev/null +++ b/implementors/packed_simd/trait.Mask.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/packed_simd/trait.SimdArray.js b/implementors/packed_simd/trait.SimdArray.js new file mode 100644 index 000000000..668d57c9a --- /dev/null +++ b/implementors/packed_simd/trait.SimdArray.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/implementors/packed_simd/trait.SimdVector.js b/implementors/packed_simd/trait.SimdVector.js new file mode 100644 index 000000000..668d57c9a --- /dev/null +++ b/implementors/packed_simd/trait.SimdVector.js @@ -0,0 +1,3 @@ +(function() {var implementors = {}; +implementors["packed_simd"] = []; +if (window.register_implementors) {window.register_implementors(implementors);} else {window.pending_implementors = implementors;}})() \ No newline at end of file diff --git a/libm/all.html b/libm/all.html new file mode 100644 index 000000000..dbf2c70d3 --- /dev/null +++ b/libm/all.html @@ -0,0 +1,3 @@ +List of all items in this crate

[] + + List of all items

Traits

Functions

\ No newline at end of file diff --git a/libm/fn.acos.html b/libm/fn.acos.html new file mode 100644 index 000000000..65ae631d4 --- /dev/null +++ b/libm/fn.acos.html @@ -0,0 +1,5 @@ +libm::acos - Rust

[][src]Function libm::acos

pub fn acos(x: f64) -> f64

Arccosine (f64)

+

Computes the inverse cosine (arc cosine) of the input value. +Arguments must be in the range -1 to 1. +Returns values in radians, in the range of 0 to pi.

+
\ No newline at end of file diff --git a/libm/fn.acosf.html b/libm/fn.acosf.html new file mode 100644 index 000000000..1ff609c15 --- /dev/null +++ b/libm/fn.acosf.html @@ -0,0 +1,5 @@ +libm::acosf - Rust

[][src]Function libm::acosf

pub fn acosf(x: f32) -> f32

Arccosine (f32)

+

Computes the inverse cosine (arc cosine) of the input value. +Arguments must be in the range -1 to 1. +Returns values in radians, in the range of 0 to pi.

+
\ No newline at end of file diff --git a/libm/fn.acosh.html b/libm/fn.acosh.html new file mode 100644 index 000000000..d6e47fa9e --- /dev/null +++ b/libm/fn.acosh.html @@ -0,0 +1,5 @@ +libm::acosh - Rust

[][src]Function libm::acosh

pub fn acosh(x: f64) -> f64

Inverse hyperbolic cosine (f64)

+

Calculates the inverse hyperbolic cosine of x. +Is defined as log(x + sqrt(x*x-1)). +x must be a number greater than or equal to 1.

+
\ No newline at end of file diff --git a/libm/fn.acoshf.html b/libm/fn.acoshf.html new file mode 100644 index 000000000..e02c197b1 --- /dev/null +++ b/libm/fn.acoshf.html @@ -0,0 +1,5 @@ +libm::acoshf - Rust

[][src]Function libm::acoshf

pub fn acoshf(x: f32) -> f32

Inverse hyperbolic cosine (f32)

+

Calculates the inverse hyperbolic cosine of x. +Is defined as log(x + sqrt(x*x-1)). +x must be a number greater than or equal to 1.

+
\ No newline at end of file diff --git a/libm/fn.asin.html b/libm/fn.asin.html new file mode 100644 index 000000000..614b4137f --- /dev/null +++ b/libm/fn.asin.html @@ -0,0 +1,5 @@ +libm::asin - Rust

[][src]Function libm::asin

pub fn asin(x: f64) -> f64

Arcsine (f64)

+

Computes the inverse sine (arc sine) of the argument x. +Arguments to asin must be in the range -1 to 1. +Returns values in radians, in the range of -pi/2 to pi/2.

+
\ No newline at end of file diff --git a/libm/fn.asinf.html b/libm/fn.asinf.html new file mode 100644 index 000000000..355d680c6 --- /dev/null +++ b/libm/fn.asinf.html @@ -0,0 +1,5 @@ +libm::asinf - Rust

[][src]Function libm::asinf

pub fn asinf(x: f32) -> f32

Arcsine (f32)

+

Computes the inverse sine (arc sine) of the argument x. +Arguments to asin must be in the range -1 to 1. +Returns values in radians, in the range of -pi/2 to pi/2.

+
\ No newline at end of file diff --git a/libm/fn.asinh.html b/libm/fn.asinh.html new file mode 100644 index 000000000..7eca6ed7b --- /dev/null +++ b/libm/fn.asinh.html @@ -0,0 +1,4 @@ +libm::asinh - Rust

[][src]Function libm::asinh

pub fn asinh(x: f64) -> f64

Inverse hyperbolic sine (f64)

+

Calculates the inverse hyperbolic sine of x. +Is defined as sgn(x)*log(|x|+sqrt(x*x+1)).

+
\ No newline at end of file diff --git a/libm/fn.asinhf.html b/libm/fn.asinhf.html new file mode 100644 index 000000000..d42dcfd15 --- /dev/null +++ b/libm/fn.asinhf.html @@ -0,0 +1,4 @@ +libm::asinhf - Rust

[][src]Function libm::asinhf

pub fn asinhf(x: f32) -> f32

Inverse hyperbolic sine (f32)

+

Calculates the inverse hyperbolic sine of x. +Is defined as sgn(x)*log(|x|+sqrt(x*x+1)).

+
\ No newline at end of file diff --git a/libm/fn.atan.html b/libm/fn.atan.html new file mode 100644 index 000000000..4f702c26c --- /dev/null +++ b/libm/fn.atan.html @@ -0,0 +1,4 @@ +libm::atan - Rust

[][src]Function libm::atan

pub fn atan(x: f64) -> f64

Arctangent (f64)

+

Computes the inverse tangent (arc tangent) of the input value. +Returns a value in radians, in the range of -pi/2 to pi/2.

+
\ No newline at end of file diff --git a/libm/fn.atan2.html b/libm/fn.atan2.html new file mode 100644 index 000000000..b4d745994 --- /dev/null +++ b/libm/fn.atan2.html @@ -0,0 +1,5 @@ +libm::atan2 - Rust

[][src]Function libm::atan2

pub fn atan2(y: f64, x: f64) -> f64

Arctangent of y/x (f64)

+

Computes the inverse tangent (arc tangent) of y/x. +Produces the correct result even for angles near pi/2 or -pi/2 (that is, when x is near 0). +Returns a value in radians, in the range of -pi to pi.

+
\ No newline at end of file diff --git a/libm/fn.atan2f.html b/libm/fn.atan2f.html new file mode 100644 index 000000000..84455f335 --- /dev/null +++ b/libm/fn.atan2f.html @@ -0,0 +1,5 @@ +libm::atan2f - Rust

[][src]Function libm::atan2f

pub fn atan2f(y: f32, x: f32) -> f32

Arctangent of y/x (f32)

+

Computes the inverse tangent (arc tangent) of y/x. +Produces the correct result even for angles near pi/2 or -pi/2 (that is, when x is near 0). +Returns a value in radians, in the range of -pi to pi.

+
\ No newline at end of file diff --git a/libm/fn.atanf.html b/libm/fn.atanf.html new file mode 100644 index 000000000..698713918 --- /dev/null +++ b/libm/fn.atanf.html @@ -0,0 +1,4 @@ +libm::atanf - Rust

[][src]Function libm::atanf

pub fn atanf(x: f32) -> f32

Arctangent (f32)

+

Computes the inverse tangent (arc tangent) of the input value. +Returns a value in radians, in the range of -pi/2 to pi/2.

+
\ No newline at end of file diff --git a/libm/fn.atanh.html b/libm/fn.atanh.html new file mode 100644 index 000000000..fe061b570 --- /dev/null +++ b/libm/fn.atanh.html @@ -0,0 +1,4 @@ +libm::atanh - Rust

[][src]Function libm::atanh

pub fn atanh(x: f64) -> f64

Inverse hyperbolic tangent (f64)

+

Calculates the inverse hyperbolic tangent of x. +Is defined as log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2.

+
\ No newline at end of file diff --git a/libm/fn.atanhf.html b/libm/fn.atanhf.html new file mode 100644 index 000000000..c9008fc03 --- /dev/null +++ b/libm/fn.atanhf.html @@ -0,0 +1,4 @@ +libm::atanhf - Rust

[][src]Function libm::atanhf

pub fn atanhf(x: f32) -> f32

Inverse hyperbolic tangent (f32)

+

Calculates the inverse hyperbolic tangent of x. +Is defined as log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2.

+
\ No newline at end of file diff --git a/libm/fn.cbrt.html b/libm/fn.cbrt.html new file mode 100644 index 000000000..d60f6cc02 --- /dev/null +++ b/libm/fn.cbrt.html @@ -0,0 +1,2 @@ +libm::cbrt - Rust

[][src]Function libm::cbrt

pub fn cbrt(x: f64) -> f64

Computes the cube root of the argument.

+
\ No newline at end of file diff --git a/libm/fn.cbrtf.html b/libm/fn.cbrtf.html new file mode 100644 index 000000000..d5a6f7c8a --- /dev/null +++ b/libm/fn.cbrtf.html @@ -0,0 +1,3 @@ +libm::cbrtf - Rust

[][src]Function libm::cbrtf

pub fn cbrtf(x: f32) -> f32

Cube root (f32)

+

Computes the cube root of the argument.

+
\ No newline at end of file diff --git a/libm/fn.ceil.html b/libm/fn.ceil.html new file mode 100644 index 000000000..ff1cba5c1 --- /dev/null +++ b/libm/fn.ceil.html @@ -0,0 +1,3 @@ +libm::ceil - Rust

[][src]Function libm::ceil

pub fn ceil(x: f64) -> f64

Ceil (f64)

+

Finds the nearest integer greater than or equal to x.

+
\ No newline at end of file diff --git a/libm/fn.ceilf.html b/libm/fn.ceilf.html new file mode 100644 index 000000000..d2d5d023c --- /dev/null +++ b/libm/fn.ceilf.html @@ -0,0 +1,3 @@ +libm::ceilf - Rust

[][src]Function libm::ceilf

pub fn ceilf(x: f32) -> f32

Ceil (f32)

+

Finds the nearest integer greater than or equal to x.

+
\ No newline at end of file diff --git a/libm/fn.copysign.html b/libm/fn.copysign.html new file mode 100644 index 000000000..88c05dc5c --- /dev/null +++ b/libm/fn.copysign.html @@ -0,0 +1,4 @@ +libm::copysign - Rust

[][src]Function libm::copysign

pub fn copysign(x: f64, y: f64) -> f64

Sign of Y, magnitude of X (f64)

+

Constructs a number with the magnitude (absolute value) of its +first argument, x, and the sign of its second argument, y.

+
\ No newline at end of file diff --git a/libm/fn.copysignf.html b/libm/fn.copysignf.html new file mode 100644 index 000000000..398c3b0ef --- /dev/null +++ b/libm/fn.copysignf.html @@ -0,0 +1,4 @@ +libm::copysignf - Rust

[][src]Function libm::copysignf

pub fn copysignf(x: f32, y: f32) -> f32

Sign of Y, magnitude of X (f32)

+

Constructs a number with the magnitude (absolute value) of its +first argument, x, and the sign of its second argument, y.

+
\ No newline at end of file diff --git a/libm/fn.cos.html b/libm/fn.cos.html new file mode 100644 index 000000000..64524c592 --- /dev/null +++ b/libm/fn.cos.html @@ -0,0 +1 @@ +libm::cos - Rust

[][src]Function libm::cos

pub fn cos(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.cosf.html b/libm/fn.cosf.html new file mode 100644 index 000000000..21344d468 --- /dev/null +++ b/libm/fn.cosf.html @@ -0,0 +1 @@ +libm::cosf - Rust

[][src]Function libm::cosf

pub fn cosf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.cosh.html b/libm/fn.cosh.html new file mode 100644 index 000000000..e74c673a7 --- /dev/null +++ b/libm/fn.cosh.html @@ -0,0 +1,5 @@ +libm::cosh - Rust

[][src]Function libm::cosh

pub fn cosh(x: f64) -> f64

Hyperbolic cosine (f64)

+

Computes the hyperbolic cosine of the argument x. +Is defined as (exp(x) + exp(-x))/2 +Angles are specified in radians.

+
\ No newline at end of file diff --git a/libm/fn.coshf.html b/libm/fn.coshf.html new file mode 100644 index 000000000..3ce06b7e4 --- /dev/null +++ b/libm/fn.coshf.html @@ -0,0 +1,5 @@ +libm::coshf - Rust

[][src]Function libm::coshf

pub fn coshf(x: f32) -> f32

Hyperbolic cosine (f64)

+

Computes the hyperbolic cosine of the argument x. +Is defined as (exp(x) + exp(-x))/2 +Angles are specified in radians.

+
\ No newline at end of file diff --git a/libm/fn.erf.html b/libm/fn.erf.html new file mode 100644 index 000000000..5dab19582 --- /dev/null +++ b/libm/fn.erf.html @@ -0,0 +1,5 @@ +libm::erf - Rust

[][src]Function libm::erf

pub fn erf(x: f64) -> f64

Error function (f64)

+

Calculates an approximation to the “error function”, which estimates +the probability that an observation will fall within x standard +deviations of the mean (assuming a normal distribution).

+
\ No newline at end of file diff --git a/libm/fn.erfc.html b/libm/fn.erfc.html new file mode 100644 index 000000000..6c29cdc6e --- /dev/null +++ b/libm/fn.erfc.html @@ -0,0 +1,6 @@ +libm::erfc - Rust

[][src]Function libm::erfc

pub fn erfc(x: f64) -> f64

Error function (f64)

+

Calculates the complementary probability. +Is 1 - erf(x). Is computed directly, so that you can use it to avoid +the loss of precision that would result from subtracting +large probabilities (on large x) from 1.

+
\ No newline at end of file diff --git a/libm/fn.erfcf.html b/libm/fn.erfcf.html new file mode 100644 index 000000000..a8cb444c8 --- /dev/null +++ b/libm/fn.erfcf.html @@ -0,0 +1,6 @@ +libm::erfcf - Rust

[][src]Function libm::erfcf

pub fn erfcf(x: f32) -> f32

Error function (f32)

+

Calculates the complementary probability. +Is 1 - erf(x). Is computed directly, so that you can use it to avoid +the loss of precision that would result from subtracting +large probabilities (on large x) from 1.

+
\ No newline at end of file diff --git a/libm/fn.erff.html b/libm/fn.erff.html new file mode 100644 index 000000000..7b1fb7a94 --- /dev/null +++ b/libm/fn.erff.html @@ -0,0 +1,5 @@ +libm::erff - Rust

[][src]Function libm::erff

pub fn erff(x: f32) -> f32

Error function (f32)

+

Calculates an approximation to the “error function”, which estimates +the probability that an observation will fall within x standard +deviations of the mean (assuming a normal distribution).

+
\ No newline at end of file diff --git a/libm/fn.exp.html b/libm/fn.exp.html new file mode 100644 index 000000000..bf2200749 --- /dev/null +++ b/libm/fn.exp.html @@ -0,0 +1,4 @@ +libm::exp - Rust

[][src]Function libm::exp

pub fn exp(x: f64) -> f64

Exponential, base e (f64)

+

Calculate the exponential of x, that is, e raised to the power x +(where e is the base of the natural system of logarithms, approximately 2.71828).

+
\ No newline at end of file diff --git a/libm/fn.exp10.html b/libm/fn.exp10.html new file mode 100644 index 000000000..6a6181ebd --- /dev/null +++ b/libm/fn.exp10.html @@ -0,0 +1 @@ +libm::exp10 - Rust

[][src]Function libm::exp10

pub fn exp10(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.exp10f.html b/libm/fn.exp10f.html new file mode 100644 index 000000000..daba3465e --- /dev/null +++ b/libm/fn.exp10f.html @@ -0,0 +1 @@ +libm::exp10f - Rust

[][src]Function libm::exp10f

pub fn exp10f(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.exp2.html b/libm/fn.exp2.html new file mode 100644 index 000000000..cb6f6dbb4 --- /dev/null +++ b/libm/fn.exp2.html @@ -0,0 +1,3 @@ +libm::exp2 - Rust

[][src]Function libm::exp2

pub fn exp2(x: f64) -> f64

Exponential, base 2 (f64)

+

Calculate 2^x, that is, 2 raised to the power x.

+
\ No newline at end of file diff --git a/libm/fn.exp2f.html b/libm/fn.exp2f.html new file mode 100644 index 000000000..35c43ed0d --- /dev/null +++ b/libm/fn.exp2f.html @@ -0,0 +1,3 @@ +libm::exp2f - Rust

[][src]Function libm::exp2f

pub fn exp2f(x: f32) -> f32

Exponential, base 2 (f32)

+

Calculate 2^x, that is, 2 raised to the power x.

+
\ No newline at end of file diff --git a/libm/fn.expf.html b/libm/fn.expf.html new file mode 100644 index 000000000..154079c80 --- /dev/null +++ b/libm/fn.expf.html @@ -0,0 +1,4 @@ +libm::expf - Rust

[][src]Function libm::expf

pub fn expf(x: f32) -> f32

Exponential, base e (f32)

+

Calculate the exponential of x, that is, e raised to the power x +(where e is the base of the natural system of logarithms, approximately 2.71828).

+
\ No newline at end of file diff --git a/libm/fn.expm1.html b/libm/fn.expm1.html new file mode 100644 index 000000000..88720f798 --- /dev/null +++ b/libm/fn.expm1.html @@ -0,0 +1,7 @@ +libm::expm1 - Rust

[][src]Function libm::expm1

pub fn expm1(x: f64) -> f64

Exponential, base e, of x-1 (f64)

+

Calculates the exponential of x and subtract 1, that is, e raised +to the power x minus 1 (where e is the base of the natural +system of logarithms, approximately 2.71828). +The result is accurate even for small values of x, +where using exp(x)-1 would lose many significant digits.

+
\ No newline at end of file diff --git a/libm/fn.expm1f.html b/libm/fn.expm1f.html new file mode 100644 index 000000000..15095b107 --- /dev/null +++ b/libm/fn.expm1f.html @@ -0,0 +1,7 @@ +libm::expm1f - Rust

[][src]Function libm::expm1f

pub fn expm1f(x: f32) -> f32

Exponential, base e, of x-1 (f32)

+

Calculates the exponential of x and subtract 1, that is, e raised +to the power x minus 1 (where e is the base of the natural +system of logarithms, approximately 2.71828). +The result is accurate even for small values of x, +where using exp(x)-1 would lose many significant digits.

+
\ No newline at end of file diff --git a/libm/fn.fabs.html b/libm/fn.fabs.html new file mode 100644 index 000000000..27e3ae971 --- /dev/null +++ b/libm/fn.fabs.html @@ -0,0 +1,4 @@ +libm::fabs - Rust

[][src]Function libm::fabs

pub fn fabs(x: f64) -> f64

Absolute value (magnitude) (f64) +Calculates the absolute value (magnitude) of the argument x, +by direct manipulation of the bit representation of x.

+
\ No newline at end of file diff --git a/libm/fn.fabsf.html b/libm/fn.fabsf.html new file mode 100644 index 000000000..bab1663e8 --- /dev/null +++ b/libm/fn.fabsf.html @@ -0,0 +1,4 @@ +libm::fabsf - Rust

[][src]Function libm::fabsf

pub fn fabsf(x: f32) -> f32

Absolute value (magnitude) (f32) +Calculates the absolute value (magnitude) of the argument x, +by direct manipulation of the bit representation of x.

+
\ No newline at end of file diff --git a/libm/fn.fdim.html b/libm/fn.fdim.html new file mode 100644 index 000000000..48ad15fdc --- /dev/null +++ b/libm/fn.fdim.html @@ -0,0 +1,9 @@ +libm::fdim - Rust

[][src]Function libm::fdim

pub fn fdim(x: f64, y: f64) -> f64

Positive difference (f64)

+

Determines the positive difference between arguments, returning:

+ +

A range error may occur.

+
\ No newline at end of file diff --git a/libm/fn.fdimf.html b/libm/fn.fdimf.html new file mode 100644 index 000000000..59ad90d33 --- /dev/null +++ b/libm/fn.fdimf.html @@ -0,0 +1,9 @@ +libm::fdimf - Rust

[][src]Function libm::fdimf

pub fn fdimf(x: f32, y: f32) -> f32

Positive difference (f32)

+

Determines the positive difference between arguments, returning:

+ +

A range error may occur.

+
\ No newline at end of file diff --git a/libm/fn.floor.html b/libm/fn.floor.html new file mode 100644 index 000000000..2e3505d8e --- /dev/null +++ b/libm/fn.floor.html @@ -0,0 +1,3 @@ +libm::floor - Rust

[][src]Function libm::floor

pub fn floor(x: f64) -> f64

Floor (f64)

+

Finds the nearest integer less than or equal to x.

+
\ No newline at end of file diff --git a/libm/fn.floorf.html b/libm/fn.floorf.html new file mode 100644 index 000000000..47c110afb --- /dev/null +++ b/libm/fn.floorf.html @@ -0,0 +1,3 @@ +libm::floorf - Rust

[][src]Function libm::floorf

pub fn floorf(x: f32) -> f32

Floor (f64)

+

Finds the nearest integer less than or equal to x.

+
\ No newline at end of file diff --git a/libm/fn.fma.html b/libm/fn.fma.html new file mode 100644 index 000000000..5af9e368c --- /dev/null +++ b/libm/fn.fma.html @@ -0,0 +1,5 @@ +libm::fma - Rust

[][src]Function libm::fma

pub fn fma(x: f64, y: f64, z: f64) -> f64

Floating multiply add (f64)

+

Computes (x*y)+z, rounded as one ternary operation: +Computes the value (as if) to infinite precision and rounds once to the result format, +according to the rounding mode characterized by the value of FLT_ROUNDS.

+
\ No newline at end of file diff --git a/libm/fn.fmaf.html b/libm/fn.fmaf.html new file mode 100644 index 000000000..57c4ce149 --- /dev/null +++ b/libm/fn.fmaf.html @@ -0,0 +1,5 @@ +libm::fmaf - Rust

[][src]Function libm::fmaf

pub fn fmaf(x: f32, y: f32, z: f32) -> f32

Floating multiply add (f32)

+

Computes (x*y)+z, rounded as one ternary operation: +Computes the value (as if) to infinite precision and rounds once to the result format, +according to the rounding mode characterized by the value of FLT_ROUNDS.

+
\ No newline at end of file diff --git a/libm/fn.fmax.html b/libm/fn.fmax.html new file mode 100644 index 000000000..be7c2ce3c --- /dev/null +++ b/libm/fn.fmax.html @@ -0,0 +1 @@ +libm::fmax - Rust

[][src]Function libm::fmax

pub fn fmax(x: f64, y: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.fmaxf.html b/libm/fn.fmaxf.html new file mode 100644 index 000000000..3664f2cda --- /dev/null +++ b/libm/fn.fmaxf.html @@ -0,0 +1 @@ +libm::fmaxf - Rust

[][src]Function libm::fmaxf

pub fn fmaxf(x: f32, y: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.fmin.html b/libm/fn.fmin.html new file mode 100644 index 000000000..839e33446 --- /dev/null +++ b/libm/fn.fmin.html @@ -0,0 +1 @@ +libm::fmin - Rust

[][src]Function libm::fmin

pub fn fmin(x: f64, y: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.fminf.html b/libm/fn.fminf.html new file mode 100644 index 000000000..18272cc3f --- /dev/null +++ b/libm/fn.fminf.html @@ -0,0 +1 @@ +libm::fminf - Rust

[][src]Function libm::fminf

pub fn fminf(x: f32, y: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.fmod.html b/libm/fn.fmod.html new file mode 100644 index 000000000..c50850030 --- /dev/null +++ b/libm/fn.fmod.html @@ -0,0 +1 @@ +libm::fmod - Rust

[][src]Function libm::fmod

pub fn fmod(x: f64, y: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.fmodf.html b/libm/fn.fmodf.html new file mode 100644 index 000000000..a231826e5 --- /dev/null +++ b/libm/fn.fmodf.html @@ -0,0 +1 @@ +libm::fmodf - Rust

[][src]Function libm::fmodf

pub fn fmodf(x: f32, y: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.frexp.html b/libm/fn.frexp.html new file mode 100644 index 000000000..d04626c9f --- /dev/null +++ b/libm/fn.frexp.html @@ -0,0 +1 @@ +libm::frexp - Rust

[][src]Function libm::frexp

pub fn frexp(x: f64) -> (f64, i32)
\ No newline at end of file diff --git a/libm/fn.frexpf.html b/libm/fn.frexpf.html new file mode 100644 index 000000000..bd8643c8f --- /dev/null +++ b/libm/fn.frexpf.html @@ -0,0 +1 @@ +libm::frexpf - Rust

[][src]Function libm::frexpf

pub fn frexpf(x: f32) -> (f32, i32)
\ No newline at end of file diff --git a/libm/fn.hypot.html b/libm/fn.hypot.html new file mode 100644 index 000000000..41b88e3a2 --- /dev/null +++ b/libm/fn.hypot.html @@ -0,0 +1 @@ +libm::hypot - Rust

[][src]Function libm::hypot

pub fn hypot(x: f64, y: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.hypotf.html b/libm/fn.hypotf.html new file mode 100644 index 000000000..532783e76 --- /dev/null +++ b/libm/fn.hypotf.html @@ -0,0 +1 @@ +libm::hypotf - Rust

[][src]Function libm::hypotf

pub fn hypotf(x: f32, y: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.ilogb.html b/libm/fn.ilogb.html new file mode 100644 index 000000000..48b1cfeaa --- /dev/null +++ b/libm/fn.ilogb.html @@ -0,0 +1 @@ +libm::ilogb - Rust

[][src]Function libm::ilogb

pub fn ilogb(x: f64) -> i32
\ No newline at end of file diff --git a/libm/fn.ilogbf.html b/libm/fn.ilogbf.html new file mode 100644 index 000000000..506557d7d --- /dev/null +++ b/libm/fn.ilogbf.html @@ -0,0 +1 @@ +libm::ilogbf - Rust

[][src]Function libm::ilogbf

pub fn ilogbf(x: f32) -> i32
\ No newline at end of file diff --git a/libm/fn.j0.html b/libm/fn.j0.html new file mode 100644 index 000000000..2fa005784 --- /dev/null +++ b/libm/fn.j0.html @@ -0,0 +1 @@ +libm::j0 - Rust

[][src]Function libm::j0

pub fn j0(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.j0f.html b/libm/fn.j0f.html new file mode 100644 index 000000000..e2fc5a29c --- /dev/null +++ b/libm/fn.j0f.html @@ -0,0 +1 @@ +libm::j0f - Rust

[][src]Function libm::j0f

pub fn j0f(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.j1.html b/libm/fn.j1.html new file mode 100644 index 000000000..d4287fbc3 --- /dev/null +++ b/libm/fn.j1.html @@ -0,0 +1 @@ +libm::j1 - Rust

[][src]Function libm::j1

pub fn j1(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.j1f.html b/libm/fn.j1f.html new file mode 100644 index 000000000..214650979 --- /dev/null +++ b/libm/fn.j1f.html @@ -0,0 +1 @@ +libm::j1f - Rust

[][src]Function libm::j1f

pub fn j1f(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.jn.html b/libm/fn.jn.html new file mode 100644 index 000000000..ab78b1c1a --- /dev/null +++ b/libm/fn.jn.html @@ -0,0 +1 @@ +libm::jn - Rust

[][src]Function libm::jn

pub fn jn(n: i32, x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.jnf.html b/libm/fn.jnf.html new file mode 100644 index 000000000..d938702a3 --- /dev/null +++ b/libm/fn.jnf.html @@ -0,0 +1 @@ +libm::jnf - Rust

[][src]Function libm::jnf

pub fn jnf(n: i32, x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.ldexp.html b/libm/fn.ldexp.html new file mode 100644 index 000000000..205edcf97 --- /dev/null +++ b/libm/fn.ldexp.html @@ -0,0 +1 @@ +libm::ldexp - Rust

[][src]Function libm::ldexp

pub fn ldexp(x: f64, n: i32) -> f64
\ No newline at end of file diff --git a/libm/fn.ldexpf.html b/libm/fn.ldexpf.html new file mode 100644 index 000000000..84cfb8192 --- /dev/null +++ b/libm/fn.ldexpf.html @@ -0,0 +1 @@ +libm::ldexpf - Rust

[][src]Function libm::ldexpf

pub fn ldexpf(x: f32, n: i32) -> f32
\ No newline at end of file diff --git a/libm/fn.lgamma.html b/libm/fn.lgamma.html new file mode 100644 index 000000000..95dd8400b --- /dev/null +++ b/libm/fn.lgamma.html @@ -0,0 +1 @@ +libm::lgamma - Rust

[][src]Function libm::lgamma

pub fn lgamma(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.lgamma_r.html b/libm/fn.lgamma_r.html new file mode 100644 index 000000000..d40edff90 --- /dev/null +++ b/libm/fn.lgamma_r.html @@ -0,0 +1 @@ +libm::lgamma_r - Rust

[][src]Function libm::lgamma_r

pub fn lgamma_r(x: f64) -> (f64, i32)
\ No newline at end of file diff --git a/libm/fn.lgammaf.html b/libm/fn.lgammaf.html new file mode 100644 index 000000000..6804e54aa --- /dev/null +++ b/libm/fn.lgammaf.html @@ -0,0 +1 @@ +libm::lgammaf - Rust

[][src]Function libm::lgammaf

pub fn lgammaf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.lgammaf_r.html b/libm/fn.lgammaf_r.html new file mode 100644 index 000000000..3281ec75d --- /dev/null +++ b/libm/fn.lgammaf_r.html @@ -0,0 +1 @@ +libm::lgammaf_r - Rust

[][src]Function libm::lgammaf_r

pub fn lgammaf_r(x: f32) -> (f32, i32)
\ No newline at end of file diff --git a/libm/fn.log.html b/libm/fn.log.html new file mode 100644 index 000000000..9d2e9c89d --- /dev/null +++ b/libm/fn.log.html @@ -0,0 +1 @@ +libm::log - Rust

[][src]Function libm::log

pub fn log(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.log10.html b/libm/fn.log10.html new file mode 100644 index 000000000..b43d8bc4e --- /dev/null +++ b/libm/fn.log10.html @@ -0,0 +1 @@ +libm::log10 - Rust

[][src]Function libm::log10

pub fn log10(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.log10f.html b/libm/fn.log10f.html new file mode 100644 index 000000000..9e802a13d --- /dev/null +++ b/libm/fn.log10f.html @@ -0,0 +1 @@ +libm::log10f - Rust

[][src]Function libm::log10f

pub fn log10f(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.log1p.html b/libm/fn.log1p.html new file mode 100644 index 000000000..30cd9b877 --- /dev/null +++ b/libm/fn.log1p.html @@ -0,0 +1 @@ +libm::log1p - Rust

[][src]Function libm::log1p

pub fn log1p(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.log1pf.html b/libm/fn.log1pf.html new file mode 100644 index 000000000..b0c0b2b3d --- /dev/null +++ b/libm/fn.log1pf.html @@ -0,0 +1 @@ +libm::log1pf - Rust

[][src]Function libm::log1pf

pub fn log1pf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.log2.html b/libm/fn.log2.html new file mode 100644 index 000000000..472ccc7c9 --- /dev/null +++ b/libm/fn.log2.html @@ -0,0 +1 @@ +libm::log2 - Rust

[][src]Function libm::log2

pub fn log2(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.log2f.html b/libm/fn.log2f.html new file mode 100644 index 000000000..d19f200bf --- /dev/null +++ b/libm/fn.log2f.html @@ -0,0 +1 @@ +libm::log2f - Rust

[][src]Function libm::log2f

pub fn log2f(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.logf.html b/libm/fn.logf.html new file mode 100644 index 000000000..923357579 --- /dev/null +++ b/libm/fn.logf.html @@ -0,0 +1 @@ +libm::logf - Rust

[][src]Function libm::logf

pub fn logf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.modf.html b/libm/fn.modf.html new file mode 100644 index 000000000..81d237827 --- /dev/null +++ b/libm/fn.modf.html @@ -0,0 +1 @@ +libm::modf - Rust

[][src]Function libm::modf

pub fn modf(x: f64) -> (f64, f64)
\ No newline at end of file diff --git a/libm/fn.modff.html b/libm/fn.modff.html new file mode 100644 index 000000000..9273c5ca2 --- /dev/null +++ b/libm/fn.modff.html @@ -0,0 +1 @@ +libm::modff - Rust

[][src]Function libm::modff

pub fn modff(x: f32) -> (f32, f32)
\ No newline at end of file diff --git a/libm/fn.pow.html b/libm/fn.pow.html new file mode 100644 index 000000000..460a75e1d --- /dev/null +++ b/libm/fn.pow.html @@ -0,0 +1 @@ +libm::pow - Rust

[][src]Function libm::pow

pub fn pow(x: f64, y: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.powf.html b/libm/fn.powf.html new file mode 100644 index 000000000..194769b4f --- /dev/null +++ b/libm/fn.powf.html @@ -0,0 +1 @@ +libm::powf - Rust

[][src]Function libm::powf

pub fn powf(x: f32, y: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.remquo.html b/libm/fn.remquo.html new file mode 100644 index 000000000..2eef182ac --- /dev/null +++ b/libm/fn.remquo.html @@ -0,0 +1 @@ +libm::remquo - Rust

[][src]Function libm::remquo

pub fn remquo(x: f64, y: f64) -> (f64, i32)
\ No newline at end of file diff --git a/libm/fn.remquof.html b/libm/fn.remquof.html new file mode 100644 index 000000000..86a7b0d20 --- /dev/null +++ b/libm/fn.remquof.html @@ -0,0 +1 @@ +libm::remquof - Rust

[][src]Function libm::remquof

pub fn remquof(x: f32, y: f32) -> (f32, i32)
\ No newline at end of file diff --git a/libm/fn.round.html b/libm/fn.round.html new file mode 100644 index 000000000..5660dd62a --- /dev/null +++ b/libm/fn.round.html @@ -0,0 +1 @@ +libm::round - Rust

[][src]Function libm::round

pub fn round(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.roundf.html b/libm/fn.roundf.html new file mode 100644 index 000000000..c95ac9f50 --- /dev/null +++ b/libm/fn.roundf.html @@ -0,0 +1 @@ +libm::roundf - Rust

[][src]Function libm::roundf

pub fn roundf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.scalbn.html b/libm/fn.scalbn.html new file mode 100644 index 000000000..b60905905 --- /dev/null +++ b/libm/fn.scalbn.html @@ -0,0 +1 @@ +libm::scalbn - Rust

[][src]Function libm::scalbn

pub fn scalbn(x: f64, n: i32) -> f64
\ No newline at end of file diff --git a/libm/fn.scalbnf.html b/libm/fn.scalbnf.html new file mode 100644 index 000000000..35d4dcde9 --- /dev/null +++ b/libm/fn.scalbnf.html @@ -0,0 +1 @@ +libm::scalbnf - Rust

[][src]Function libm::scalbnf

pub fn scalbnf(x: f32, n: i32) -> f32
\ No newline at end of file diff --git a/libm/fn.sin.html b/libm/fn.sin.html new file mode 100644 index 000000000..0c11ad109 --- /dev/null +++ b/libm/fn.sin.html @@ -0,0 +1 @@ +libm::sin - Rust

[][src]Function libm::sin

pub fn sin(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.sincos.html b/libm/fn.sincos.html new file mode 100644 index 000000000..05f84ff7f --- /dev/null +++ b/libm/fn.sincos.html @@ -0,0 +1 @@ +libm::sincos - Rust

[][src]Function libm::sincos

pub fn sincos(x: f64) -> (f64, f64)
\ No newline at end of file diff --git a/libm/fn.sincosf.html b/libm/fn.sincosf.html new file mode 100644 index 000000000..3930d5731 --- /dev/null +++ b/libm/fn.sincosf.html @@ -0,0 +1 @@ +libm::sincosf - Rust

[][src]Function libm::sincosf

pub fn sincosf(x: f32) -> (f32, f32)
\ No newline at end of file diff --git a/libm/fn.sinf.html b/libm/fn.sinf.html new file mode 100644 index 000000000..1924178eb --- /dev/null +++ b/libm/fn.sinf.html @@ -0,0 +1 @@ +libm::sinf - Rust

[][src]Function libm::sinf

pub fn sinf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.sinh.html b/libm/fn.sinh.html new file mode 100644 index 000000000..b028788fa --- /dev/null +++ b/libm/fn.sinh.html @@ -0,0 +1 @@ +libm::sinh - Rust

[][src]Function libm::sinh

pub fn sinh(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.sinhf.html b/libm/fn.sinhf.html new file mode 100644 index 000000000..1d9299502 --- /dev/null +++ b/libm/fn.sinhf.html @@ -0,0 +1 @@ +libm::sinhf - Rust

[][src]Function libm::sinhf

pub fn sinhf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.sqrt.html b/libm/fn.sqrt.html new file mode 100644 index 000000000..068bcbae5 --- /dev/null +++ b/libm/fn.sqrt.html @@ -0,0 +1 @@ +libm::sqrt - Rust

[][src]Function libm::sqrt

pub fn sqrt(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.sqrtf.html b/libm/fn.sqrtf.html new file mode 100644 index 000000000..293f4d47f --- /dev/null +++ b/libm/fn.sqrtf.html @@ -0,0 +1 @@ +libm::sqrtf - Rust

[][src]Function libm::sqrtf

pub fn sqrtf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.tan.html b/libm/fn.tan.html new file mode 100644 index 000000000..6ba99533f --- /dev/null +++ b/libm/fn.tan.html @@ -0,0 +1 @@ +libm::tan - Rust

[][src]Function libm::tan

pub fn tan(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.tanf.html b/libm/fn.tanf.html new file mode 100644 index 000000000..1880a598d --- /dev/null +++ b/libm/fn.tanf.html @@ -0,0 +1 @@ +libm::tanf - Rust

[][src]Function libm::tanf

pub fn tanf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.tanh.html b/libm/fn.tanh.html new file mode 100644 index 000000000..ce763ed99 --- /dev/null +++ b/libm/fn.tanh.html @@ -0,0 +1 @@ +libm::tanh - Rust

[][src]Function libm::tanh

pub fn tanh(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.tanhf.html b/libm/fn.tanhf.html new file mode 100644 index 000000000..4a1106c12 --- /dev/null +++ b/libm/fn.tanhf.html @@ -0,0 +1 @@ +libm::tanhf - Rust

[][src]Function libm::tanhf

pub fn tanhf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.tgamma.html b/libm/fn.tgamma.html new file mode 100644 index 000000000..7573bfaaa --- /dev/null +++ b/libm/fn.tgamma.html @@ -0,0 +1 @@ +libm::tgamma - Rust

[][src]Function libm::tgamma

pub fn tgamma(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.tgammaf.html b/libm/fn.tgammaf.html new file mode 100644 index 000000000..2c4990935 --- /dev/null +++ b/libm/fn.tgammaf.html @@ -0,0 +1 @@ +libm::tgammaf - Rust

[][src]Function libm::tgammaf

pub fn tgammaf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.trunc.html b/libm/fn.trunc.html new file mode 100644 index 000000000..5cd9f03ff --- /dev/null +++ b/libm/fn.trunc.html @@ -0,0 +1 @@ +libm::trunc - Rust

[][src]Function libm::trunc

pub fn trunc(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.truncf.html b/libm/fn.truncf.html new file mode 100644 index 000000000..94d1d6694 --- /dev/null +++ b/libm/fn.truncf.html @@ -0,0 +1 @@ +libm::truncf - Rust

[][src]Function libm::truncf

pub fn truncf(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.y0.html b/libm/fn.y0.html new file mode 100644 index 000000000..521993d62 --- /dev/null +++ b/libm/fn.y0.html @@ -0,0 +1 @@ +libm::y0 - Rust

[][src]Function libm::y0

pub fn y0(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.y0f.html b/libm/fn.y0f.html new file mode 100644 index 000000000..0be5fd747 --- /dev/null +++ b/libm/fn.y0f.html @@ -0,0 +1 @@ +libm::y0f - Rust

[][src]Function libm::y0f

pub fn y0f(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.y1.html b/libm/fn.y1.html new file mode 100644 index 000000000..2c4aa4589 --- /dev/null +++ b/libm/fn.y1.html @@ -0,0 +1 @@ +libm::y1 - Rust

[][src]Function libm::y1

pub fn y1(x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.y1f.html b/libm/fn.y1f.html new file mode 100644 index 000000000..40b4710fe --- /dev/null +++ b/libm/fn.y1f.html @@ -0,0 +1 @@ +libm::y1f - Rust

[][src]Function libm::y1f

pub fn y1f(x: f32) -> f32
\ No newline at end of file diff --git a/libm/fn.yn.html b/libm/fn.yn.html new file mode 100644 index 000000000..08d775cc7 --- /dev/null +++ b/libm/fn.yn.html @@ -0,0 +1 @@ +libm::yn - Rust

[][src]Function libm::yn

pub fn yn(n: i32, x: f64) -> f64
\ No newline at end of file diff --git a/libm/fn.ynf.html b/libm/fn.ynf.html new file mode 100644 index 000000000..cc879c01d --- /dev/null +++ b/libm/fn.ynf.html @@ -0,0 +1 @@ +libm::ynf - Rust

[][src]Function libm::ynf

pub fn ynf(n: i32, x: f32) -> f32
\ No newline at end of file diff --git a/libm/index.html b/libm/index.html new file mode 100644 index 000000000..663a20c1d --- /dev/null +++ b/libm/index.html @@ -0,0 +1,61 @@ +libm - Rust

[][src]Crate libm

libm in pure Rust

+

Usage

+

You can use this crate in two ways:

+ +

Traits

+
F32Ext

Math support for f32

+
F64Ext

Math support for f64

+

Functions

+
acos

Arccosine (f64)

+
acosf

Arccosine (f32)

+
acosh

Inverse hyperbolic cosine (f64)

+
acoshf

Inverse hyperbolic cosine (f32)

+
asin

Arcsine (f64)

+
asinf

Arcsine (f32)

+
asinh

Inverse hyperbolic sine (f64)

+
asinhf

Inverse hyperbolic sine (f32)

+
atan

Arctangent (f64)

+
atan2

Arctangent of y/x (f64)

+
atan2f

Arctangent of y/x (f32)

+
atanf

Arctangent (f32)

+
atanh

Inverse hyperbolic tangent (f64)

+
atanhf

Inverse hyperbolic tangent (f32)

+
cbrt

Computes the cube root of the argument.

+
cbrtf

Cube root (f32)

+
ceil

Ceil (f64)

+
ceilf

Ceil (f32)

+
copysign

Sign of Y, magnitude of X (f64)

+
copysignf

Sign of Y, magnitude of X (f32)

+
cos
cosf
cosh

Hyperbolic cosine (f64)

+
coshf

Hyperbolic cosine (f64)

+
erf

Error function (f64)

+
erfc

Error function (f64)

+
erfcf

Error function (f32)

+
erff

Error function (f32)

+
exp

Exponential, base e (f64)

+
exp2

Exponential, base 2 (f64)

+
exp2f

Exponential, base 2 (f32)

+
exp10
exp10f
expf

Exponential, base e (f32)

+
expm1

Exponential, base e, of x-1 (f64)

+
expm1f

Exponential, base e, of x-1 (f32)

+
fabs

Absolute value (magnitude) (f64) +Calculates the absolute value (magnitude) of the argument x, +by direct manipulation of the bit representation of x.

+
fabsf

Absolute value (magnitude) (f32) +Calculates the absolute value (magnitude) of the argument x, +by direct manipulation of the bit representation of x.

+
fdim

Positive difference (f64)

+
fdimf

Positive difference (f32)

+
floor

Floor (f64)

+
floorf

Floor (f64)

+
fma

Floating multiply add (f64)

+
fmaf

Floating multiply add (f32)

+
fmax
fmaxf
fmin
fminf
fmod
fmodf
frexp
frexpf
hypot
hypotf
ilogb
ilogbf
j0
j0f
j1
j1f
jn
jnf
ldexp
ldexpf
lgamma
lgamma_r
lgammaf
lgammaf_r
log
log1p
log1pf
log2
log2f
log10
log10f
logf
modf
modff
pow
powf
remquo
remquof
round
roundf
scalbn
scalbnf
sin
sincos
sincosf
sinf
sinh
sinhf
sqrt
sqrtf
tan
tanf
tanh
tanhf
tgamma
tgammaf
trunc
truncf
y0
y0f
y1
y1f
yn
ynf
\ No newline at end of file diff --git a/libm/math/acos/fn.acos.html b/libm/math/acos/fn.acos.html new file mode 100644 index 000000000..5d1fa2a30 --- /dev/null +++ b/libm/math/acos/fn.acos.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.acos.html...

+ + + \ No newline at end of file diff --git a/libm/math/acosf/fn.acosf.html b/libm/math/acosf/fn.acosf.html new file mode 100644 index 000000000..f95217eda --- /dev/null +++ b/libm/math/acosf/fn.acosf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.acosf.html...

+ + + \ No newline at end of file diff --git a/libm/math/acosh/fn.acosh.html b/libm/math/acosh/fn.acosh.html new file mode 100644 index 000000000..f5ca3fb68 --- /dev/null +++ b/libm/math/acosh/fn.acosh.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.acosh.html...

+ + + \ No newline at end of file diff --git a/libm/math/acoshf/fn.acoshf.html b/libm/math/acoshf/fn.acoshf.html new file mode 100644 index 000000000..d1e024046 --- /dev/null +++ b/libm/math/acoshf/fn.acoshf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.acoshf.html...

+ + + \ No newline at end of file diff --git a/libm/math/asin/fn.asin.html b/libm/math/asin/fn.asin.html new file mode 100644 index 000000000..c21e66032 --- /dev/null +++ b/libm/math/asin/fn.asin.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.asin.html...

+ + + \ No newline at end of file diff --git a/libm/math/asinf/fn.asinf.html b/libm/math/asinf/fn.asinf.html new file mode 100644 index 000000000..3b5fc2a04 --- /dev/null +++ b/libm/math/asinf/fn.asinf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.asinf.html...

+ + + \ No newline at end of file diff --git a/libm/math/asinh/fn.asinh.html b/libm/math/asinh/fn.asinh.html new file mode 100644 index 000000000..c772f95da --- /dev/null +++ b/libm/math/asinh/fn.asinh.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.asinh.html...

+ + + \ No newline at end of file diff --git a/libm/math/asinhf/fn.asinhf.html b/libm/math/asinhf/fn.asinhf.html new file mode 100644 index 000000000..6de67246e --- /dev/null +++ b/libm/math/asinhf/fn.asinhf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.asinhf.html...

+ + + \ No newline at end of file diff --git a/libm/math/atan/fn.atan.html b/libm/math/atan/fn.atan.html new file mode 100644 index 000000000..1ea7a27d1 --- /dev/null +++ b/libm/math/atan/fn.atan.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.atan.html...

+ + + \ No newline at end of file diff --git a/libm/math/atan2/fn.atan2.html b/libm/math/atan2/fn.atan2.html new file mode 100644 index 000000000..574c41e6e --- /dev/null +++ b/libm/math/atan2/fn.atan2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.atan2.html...

+ + + \ No newline at end of file diff --git a/libm/math/atan2f/fn.atan2f.html b/libm/math/atan2f/fn.atan2f.html new file mode 100644 index 000000000..332cb4865 --- /dev/null +++ b/libm/math/atan2f/fn.atan2f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.atan2f.html...

+ + + \ No newline at end of file diff --git a/libm/math/atanf/fn.atanf.html b/libm/math/atanf/fn.atanf.html new file mode 100644 index 000000000..4133a0df3 --- /dev/null +++ b/libm/math/atanf/fn.atanf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.atanf.html...

+ + + \ No newline at end of file diff --git a/libm/math/atanh/fn.atanh.html b/libm/math/atanh/fn.atanh.html new file mode 100644 index 000000000..133556c75 --- /dev/null +++ b/libm/math/atanh/fn.atanh.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.atanh.html...

+ + + \ No newline at end of file diff --git a/libm/math/atanhf/fn.atanhf.html b/libm/math/atanhf/fn.atanhf.html new file mode 100644 index 000000000..69fa98341 --- /dev/null +++ b/libm/math/atanhf/fn.atanhf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.atanhf.html...

+ + + \ No newline at end of file diff --git a/libm/math/cbrt/fn.cbrt.html b/libm/math/cbrt/fn.cbrt.html new file mode 100644 index 000000000..7de81dd0a --- /dev/null +++ b/libm/math/cbrt/fn.cbrt.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.cbrt.html...

+ + + \ No newline at end of file diff --git a/libm/math/cbrtf/fn.cbrtf.html b/libm/math/cbrtf/fn.cbrtf.html new file mode 100644 index 000000000..9d20563a3 --- /dev/null +++ b/libm/math/cbrtf/fn.cbrtf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.cbrtf.html...

+ + + \ No newline at end of file diff --git a/libm/math/ceil/fn.ceil.html b/libm/math/ceil/fn.ceil.html new file mode 100644 index 000000000..e0a43c081 --- /dev/null +++ b/libm/math/ceil/fn.ceil.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.ceil.html...

+ + + \ No newline at end of file diff --git a/libm/math/ceilf/fn.ceilf.html b/libm/math/ceilf/fn.ceilf.html new file mode 100644 index 000000000..aa710880f --- /dev/null +++ b/libm/math/ceilf/fn.ceilf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.ceilf.html...

+ + + \ No newline at end of file diff --git a/libm/math/copysign/fn.copysign.html b/libm/math/copysign/fn.copysign.html new file mode 100644 index 000000000..bf309b544 --- /dev/null +++ b/libm/math/copysign/fn.copysign.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.copysign.html...

+ + + \ No newline at end of file diff --git a/libm/math/copysignf/fn.copysignf.html b/libm/math/copysignf/fn.copysignf.html new file mode 100644 index 000000000..406fd5610 --- /dev/null +++ b/libm/math/copysignf/fn.copysignf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.copysignf.html...

+ + + \ No newline at end of file diff --git a/libm/math/cos/fn.cos.html b/libm/math/cos/fn.cos.html new file mode 100644 index 000000000..dfbbc8218 --- /dev/null +++ b/libm/math/cos/fn.cos.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.cos.html...

+ + + \ No newline at end of file diff --git a/libm/math/cosf/fn.cosf.html b/libm/math/cosf/fn.cosf.html new file mode 100644 index 000000000..54a23aae0 --- /dev/null +++ b/libm/math/cosf/fn.cosf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.cosf.html...

+ + + \ No newline at end of file diff --git a/libm/math/cosh/fn.cosh.html b/libm/math/cosh/fn.cosh.html new file mode 100644 index 000000000..4c4df4fe9 --- /dev/null +++ b/libm/math/cosh/fn.cosh.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.cosh.html...

+ + + \ No newline at end of file diff --git a/libm/math/coshf/fn.coshf.html b/libm/math/coshf/fn.coshf.html new file mode 100644 index 000000000..09315971e --- /dev/null +++ b/libm/math/coshf/fn.coshf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.coshf.html...

+ + + \ No newline at end of file diff --git a/libm/math/erf/fn.erf.html b/libm/math/erf/fn.erf.html new file mode 100644 index 000000000..747369392 --- /dev/null +++ b/libm/math/erf/fn.erf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.erf.html...

+ + + \ No newline at end of file diff --git a/libm/math/erf/fn.erfc.html b/libm/math/erf/fn.erfc.html new file mode 100644 index 000000000..e1de18e0b --- /dev/null +++ b/libm/math/erf/fn.erfc.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.erfc.html...

+ + + \ No newline at end of file diff --git a/libm/math/erff/fn.erfcf.html b/libm/math/erff/fn.erfcf.html new file mode 100644 index 000000000..2274bfc1f --- /dev/null +++ b/libm/math/erff/fn.erfcf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.erfcf.html...

+ + + \ No newline at end of file diff --git a/libm/math/erff/fn.erff.html b/libm/math/erff/fn.erff.html new file mode 100644 index 000000000..052b8fb37 --- /dev/null +++ b/libm/math/erff/fn.erff.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.erff.html...

+ + + \ No newline at end of file diff --git a/libm/math/exp/fn.exp.html b/libm/math/exp/fn.exp.html new file mode 100644 index 000000000..11d29dc9a --- /dev/null +++ b/libm/math/exp/fn.exp.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.exp.html...

+ + + \ No newline at end of file diff --git a/libm/math/exp10/fn.exp10.html b/libm/math/exp10/fn.exp10.html new file mode 100644 index 000000000..27dfd5f1e --- /dev/null +++ b/libm/math/exp10/fn.exp10.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.exp10.html...

+ + + \ No newline at end of file diff --git a/libm/math/exp10f/fn.exp10f.html b/libm/math/exp10f/fn.exp10f.html new file mode 100644 index 000000000..b510af752 --- /dev/null +++ b/libm/math/exp10f/fn.exp10f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.exp10f.html...

+ + + \ No newline at end of file diff --git a/libm/math/exp2/fn.exp2.html b/libm/math/exp2/fn.exp2.html new file mode 100644 index 000000000..6752da03f --- /dev/null +++ b/libm/math/exp2/fn.exp2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.exp2.html...

+ + + \ No newline at end of file diff --git a/libm/math/exp2f/fn.exp2f.html b/libm/math/exp2f/fn.exp2f.html new file mode 100644 index 000000000..65b1a0513 --- /dev/null +++ b/libm/math/exp2f/fn.exp2f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.exp2f.html...

+ + + \ No newline at end of file diff --git a/libm/math/expf/fn.expf.html b/libm/math/expf/fn.expf.html new file mode 100644 index 000000000..5f8422db5 --- /dev/null +++ b/libm/math/expf/fn.expf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.expf.html...

+ + + \ No newline at end of file diff --git a/libm/math/expm1/fn.expm1.html b/libm/math/expm1/fn.expm1.html new file mode 100644 index 000000000..fffce7d16 --- /dev/null +++ b/libm/math/expm1/fn.expm1.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.expm1.html...

+ + + \ No newline at end of file diff --git a/libm/math/expm1f/fn.expm1f.html b/libm/math/expm1f/fn.expm1f.html new file mode 100644 index 000000000..a0d12bdee --- /dev/null +++ b/libm/math/expm1f/fn.expm1f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.expm1f.html...

+ + + \ No newline at end of file diff --git a/libm/math/fabs/fn.fabs.html b/libm/math/fabs/fn.fabs.html new file mode 100644 index 000000000..b4a46205a --- /dev/null +++ b/libm/math/fabs/fn.fabs.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fabs.html...

+ + + \ No newline at end of file diff --git a/libm/math/fabsf/fn.fabsf.html b/libm/math/fabsf/fn.fabsf.html new file mode 100644 index 000000000..37888e3d4 --- /dev/null +++ b/libm/math/fabsf/fn.fabsf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fabsf.html...

+ + + \ No newline at end of file diff --git a/libm/math/fdim/fn.fdim.html b/libm/math/fdim/fn.fdim.html new file mode 100644 index 000000000..c1ee595c1 --- /dev/null +++ b/libm/math/fdim/fn.fdim.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fdim.html...

+ + + \ No newline at end of file diff --git a/libm/math/fdimf/fn.fdimf.html b/libm/math/fdimf/fn.fdimf.html new file mode 100644 index 000000000..61127d824 --- /dev/null +++ b/libm/math/fdimf/fn.fdimf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fdimf.html...

+ + + \ No newline at end of file diff --git a/libm/math/floor/fn.floor.html b/libm/math/floor/fn.floor.html new file mode 100644 index 000000000..a89e7c506 --- /dev/null +++ b/libm/math/floor/fn.floor.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.floor.html...

+ + + \ No newline at end of file diff --git a/libm/math/floorf/fn.floorf.html b/libm/math/floorf/fn.floorf.html new file mode 100644 index 000000000..b42efbfd5 --- /dev/null +++ b/libm/math/floorf/fn.floorf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.floorf.html...

+ + + \ No newline at end of file diff --git a/libm/math/fma/fn.fma.html b/libm/math/fma/fn.fma.html new file mode 100644 index 000000000..48edfb111 --- /dev/null +++ b/libm/math/fma/fn.fma.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fma.html...

+ + + \ No newline at end of file diff --git a/libm/math/fmaf/fn.fmaf.html b/libm/math/fmaf/fn.fmaf.html new file mode 100644 index 000000000..dfbebf001 --- /dev/null +++ b/libm/math/fmaf/fn.fmaf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fmaf.html...

+ + + \ No newline at end of file diff --git a/libm/math/fmax/fn.fmax.html b/libm/math/fmax/fn.fmax.html new file mode 100644 index 000000000..44ebcffc9 --- /dev/null +++ b/libm/math/fmax/fn.fmax.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fmax.html...

+ + + \ No newline at end of file diff --git a/libm/math/fmaxf/fn.fmaxf.html b/libm/math/fmaxf/fn.fmaxf.html new file mode 100644 index 000000000..e16ecaf92 --- /dev/null +++ b/libm/math/fmaxf/fn.fmaxf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fmaxf.html...

+ + + \ No newline at end of file diff --git a/libm/math/fmin/fn.fmin.html b/libm/math/fmin/fn.fmin.html new file mode 100644 index 000000000..a152018ca --- /dev/null +++ b/libm/math/fmin/fn.fmin.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fmin.html...

+ + + \ No newline at end of file diff --git a/libm/math/fminf/fn.fminf.html b/libm/math/fminf/fn.fminf.html new file mode 100644 index 000000000..8f4bc2a57 --- /dev/null +++ b/libm/math/fminf/fn.fminf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fminf.html...

+ + + \ No newline at end of file diff --git a/libm/math/fmod/fn.fmod.html b/libm/math/fmod/fn.fmod.html new file mode 100644 index 000000000..e32e7a307 --- /dev/null +++ b/libm/math/fmod/fn.fmod.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fmod.html...

+ + + \ No newline at end of file diff --git a/libm/math/fmodf/fn.fmodf.html b/libm/math/fmodf/fn.fmodf.html new file mode 100644 index 000000000..c45889792 --- /dev/null +++ b/libm/math/fmodf/fn.fmodf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.fmodf.html...

+ + + \ No newline at end of file diff --git a/libm/math/frexp/fn.frexp.html b/libm/math/frexp/fn.frexp.html new file mode 100644 index 000000000..81937bac4 --- /dev/null +++ b/libm/math/frexp/fn.frexp.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.frexp.html...

+ + + \ No newline at end of file diff --git a/libm/math/frexpf/fn.frexpf.html b/libm/math/frexpf/fn.frexpf.html new file mode 100644 index 000000000..18c0575fa --- /dev/null +++ b/libm/math/frexpf/fn.frexpf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.frexpf.html...

+ + + \ No newline at end of file diff --git a/libm/math/hypot/fn.hypot.html b/libm/math/hypot/fn.hypot.html new file mode 100644 index 000000000..71528e63d --- /dev/null +++ b/libm/math/hypot/fn.hypot.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.hypot.html...

+ + + \ No newline at end of file diff --git a/libm/math/hypotf/fn.hypotf.html b/libm/math/hypotf/fn.hypotf.html new file mode 100644 index 000000000..ac90361d0 --- /dev/null +++ b/libm/math/hypotf/fn.hypotf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.hypotf.html...

+ + + \ No newline at end of file diff --git a/libm/math/ilogb/fn.ilogb.html b/libm/math/ilogb/fn.ilogb.html new file mode 100644 index 000000000..8a67ff9b1 --- /dev/null +++ b/libm/math/ilogb/fn.ilogb.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.ilogb.html...

+ + + \ No newline at end of file diff --git a/libm/math/ilogbf/fn.ilogbf.html b/libm/math/ilogbf/fn.ilogbf.html new file mode 100644 index 000000000..09fe1e488 --- /dev/null +++ b/libm/math/ilogbf/fn.ilogbf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.ilogbf.html...

+ + + \ No newline at end of file diff --git a/libm/math/j0/fn.j0.html b/libm/math/j0/fn.j0.html new file mode 100644 index 000000000..7ca55242f --- /dev/null +++ b/libm/math/j0/fn.j0.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.j0.html...

+ + + \ No newline at end of file diff --git a/libm/math/j0/fn.y0.html b/libm/math/j0/fn.y0.html new file mode 100644 index 000000000..0cd9feff8 --- /dev/null +++ b/libm/math/j0/fn.y0.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.y0.html...

+ + + \ No newline at end of file diff --git a/libm/math/j0f/fn.j0f.html b/libm/math/j0f/fn.j0f.html new file mode 100644 index 000000000..b7a33b9eb --- /dev/null +++ b/libm/math/j0f/fn.j0f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.j0f.html...

+ + + \ No newline at end of file diff --git a/libm/math/j0f/fn.y0f.html b/libm/math/j0f/fn.y0f.html new file mode 100644 index 000000000..b7136067e --- /dev/null +++ b/libm/math/j0f/fn.y0f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.y0f.html...

+ + + \ No newline at end of file diff --git a/libm/math/j1/fn.j1.html b/libm/math/j1/fn.j1.html new file mode 100644 index 000000000..e7fcf48e9 --- /dev/null +++ b/libm/math/j1/fn.j1.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.j1.html...

+ + + \ No newline at end of file diff --git a/libm/math/j1/fn.y1.html b/libm/math/j1/fn.y1.html new file mode 100644 index 000000000..b9c75a31d --- /dev/null +++ b/libm/math/j1/fn.y1.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.y1.html...

+ + + \ No newline at end of file diff --git a/libm/math/j1f/fn.j1f.html b/libm/math/j1f/fn.j1f.html new file mode 100644 index 000000000..8112c62a2 --- /dev/null +++ b/libm/math/j1f/fn.j1f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.j1f.html...

+ + + \ No newline at end of file diff --git a/libm/math/j1f/fn.y1f.html b/libm/math/j1f/fn.y1f.html new file mode 100644 index 000000000..3c3f7efdd --- /dev/null +++ b/libm/math/j1f/fn.y1f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.y1f.html...

+ + + \ No newline at end of file diff --git a/libm/math/jn/fn.jn.html b/libm/math/jn/fn.jn.html new file mode 100644 index 000000000..0de6cf835 --- /dev/null +++ b/libm/math/jn/fn.jn.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.jn.html...

+ + + \ No newline at end of file diff --git a/libm/math/jn/fn.yn.html b/libm/math/jn/fn.yn.html new file mode 100644 index 000000000..71acecbe4 --- /dev/null +++ b/libm/math/jn/fn.yn.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.yn.html...

+ + + \ No newline at end of file diff --git a/libm/math/jnf/fn.jnf.html b/libm/math/jnf/fn.jnf.html new file mode 100644 index 000000000..62ea7e7c0 --- /dev/null +++ b/libm/math/jnf/fn.jnf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.jnf.html...

+ + + \ No newline at end of file diff --git a/libm/math/jnf/fn.ynf.html b/libm/math/jnf/fn.ynf.html new file mode 100644 index 000000000..19b28af8e --- /dev/null +++ b/libm/math/jnf/fn.ynf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.ynf.html...

+ + + \ No newline at end of file diff --git a/libm/math/ldexp/fn.ldexp.html b/libm/math/ldexp/fn.ldexp.html new file mode 100644 index 000000000..a2769dd0d --- /dev/null +++ b/libm/math/ldexp/fn.ldexp.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.ldexp.html...

+ + + \ No newline at end of file diff --git a/libm/math/ldexpf/fn.ldexpf.html b/libm/math/ldexpf/fn.ldexpf.html new file mode 100644 index 000000000..ea5c342c3 --- /dev/null +++ b/libm/math/ldexpf/fn.ldexpf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.ldexpf.html...

+ + + \ No newline at end of file diff --git a/libm/math/lgamma/fn.lgamma.html b/libm/math/lgamma/fn.lgamma.html new file mode 100644 index 000000000..63b67a00c --- /dev/null +++ b/libm/math/lgamma/fn.lgamma.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.lgamma.html...

+ + + \ No newline at end of file diff --git a/libm/math/lgamma_r/fn.lgamma_r.html b/libm/math/lgamma_r/fn.lgamma_r.html new file mode 100644 index 000000000..44e13f37b --- /dev/null +++ b/libm/math/lgamma_r/fn.lgamma_r.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.lgamma_r.html...

+ + + \ No newline at end of file diff --git a/libm/math/lgammaf/fn.lgammaf.html b/libm/math/lgammaf/fn.lgammaf.html new file mode 100644 index 000000000..a839aa5b8 --- /dev/null +++ b/libm/math/lgammaf/fn.lgammaf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.lgammaf.html...

+ + + \ No newline at end of file diff --git a/libm/math/lgammaf_r/fn.lgammaf_r.html b/libm/math/lgammaf_r/fn.lgammaf_r.html new file mode 100644 index 000000000..4eab71ef2 --- /dev/null +++ b/libm/math/lgammaf_r/fn.lgammaf_r.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.lgammaf_r.html...

+ + + \ No newline at end of file diff --git a/libm/math/log/fn.log.html b/libm/math/log/fn.log.html new file mode 100644 index 000000000..eee0c81c3 --- /dev/null +++ b/libm/math/log/fn.log.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.log.html...

+ + + \ No newline at end of file diff --git a/libm/math/log10/fn.log10.html b/libm/math/log10/fn.log10.html new file mode 100644 index 000000000..63cd62ecf --- /dev/null +++ b/libm/math/log10/fn.log10.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.log10.html...

+ + + \ No newline at end of file diff --git a/libm/math/log10f/fn.log10f.html b/libm/math/log10f/fn.log10f.html new file mode 100644 index 000000000..e6b960df9 --- /dev/null +++ b/libm/math/log10f/fn.log10f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.log10f.html...

+ + + \ No newline at end of file diff --git a/libm/math/log1p/fn.log1p.html b/libm/math/log1p/fn.log1p.html new file mode 100644 index 000000000..451784a5e --- /dev/null +++ b/libm/math/log1p/fn.log1p.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.log1p.html...

+ + + \ No newline at end of file diff --git a/libm/math/log1pf/fn.log1pf.html b/libm/math/log1pf/fn.log1pf.html new file mode 100644 index 000000000..19e6ba377 --- /dev/null +++ b/libm/math/log1pf/fn.log1pf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.log1pf.html...

+ + + \ No newline at end of file diff --git a/libm/math/log2/fn.log2.html b/libm/math/log2/fn.log2.html new file mode 100644 index 000000000..f8ff72c81 --- /dev/null +++ b/libm/math/log2/fn.log2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.log2.html...

+ + + \ No newline at end of file diff --git a/libm/math/log2f/fn.log2f.html b/libm/math/log2f/fn.log2f.html new file mode 100644 index 000000000..75ea5fe66 --- /dev/null +++ b/libm/math/log2f/fn.log2f.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.log2f.html...

+ + + \ No newline at end of file diff --git a/libm/math/logf/fn.logf.html b/libm/math/logf/fn.logf.html new file mode 100644 index 000000000..5f0cdaf6d --- /dev/null +++ b/libm/math/logf/fn.logf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.logf.html...

+ + + \ No newline at end of file diff --git a/libm/math/modf/fn.modf.html b/libm/math/modf/fn.modf.html new file mode 100644 index 000000000..32da8bd6d --- /dev/null +++ b/libm/math/modf/fn.modf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.modf.html...

+ + + \ No newline at end of file diff --git a/libm/math/modff/fn.modff.html b/libm/math/modff/fn.modff.html new file mode 100644 index 000000000..d85239d52 --- /dev/null +++ b/libm/math/modff/fn.modff.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.modff.html...

+ + + \ No newline at end of file diff --git a/libm/math/pow/fn.pow.html b/libm/math/pow/fn.pow.html new file mode 100644 index 000000000..f19a7b151 --- /dev/null +++ b/libm/math/pow/fn.pow.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.pow.html...

+ + + \ No newline at end of file diff --git a/libm/math/powf/fn.powf.html b/libm/math/powf/fn.powf.html new file mode 100644 index 000000000..515748c82 --- /dev/null +++ b/libm/math/powf/fn.powf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.powf.html...

+ + + \ No newline at end of file diff --git a/libm/math/remquo/fn.remquo.html b/libm/math/remquo/fn.remquo.html new file mode 100644 index 000000000..63182a22e --- /dev/null +++ b/libm/math/remquo/fn.remquo.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.remquo.html...

+ + + \ No newline at end of file diff --git a/libm/math/remquof/fn.remquof.html b/libm/math/remquof/fn.remquof.html new file mode 100644 index 000000000..760c5877b --- /dev/null +++ b/libm/math/remquof/fn.remquof.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.remquof.html...

+ + + \ No newline at end of file diff --git a/libm/math/round/fn.round.html b/libm/math/round/fn.round.html new file mode 100644 index 000000000..2878a2a32 --- /dev/null +++ b/libm/math/round/fn.round.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.round.html...

+ + + \ No newline at end of file diff --git a/libm/math/roundf/fn.roundf.html b/libm/math/roundf/fn.roundf.html new file mode 100644 index 000000000..6b7fb2391 --- /dev/null +++ b/libm/math/roundf/fn.roundf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.roundf.html...

+ + + \ No newline at end of file diff --git a/libm/math/scalbn/fn.scalbn.html b/libm/math/scalbn/fn.scalbn.html new file mode 100644 index 000000000..27b781b6c --- /dev/null +++ b/libm/math/scalbn/fn.scalbn.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.scalbn.html...

+ + + \ No newline at end of file diff --git a/libm/math/scalbnf/fn.scalbnf.html b/libm/math/scalbnf/fn.scalbnf.html new file mode 100644 index 000000000..b8f1f9c37 --- /dev/null +++ b/libm/math/scalbnf/fn.scalbnf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.scalbnf.html...

+ + + \ No newline at end of file diff --git a/libm/math/sin/fn.sin.html b/libm/math/sin/fn.sin.html new file mode 100644 index 000000000..b880a31b1 --- /dev/null +++ b/libm/math/sin/fn.sin.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sin.html...

+ + + \ No newline at end of file diff --git a/libm/math/sincos/fn.sincos.html b/libm/math/sincos/fn.sincos.html new file mode 100644 index 000000000..604272e08 --- /dev/null +++ b/libm/math/sincos/fn.sincos.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sincos.html...

+ + + \ No newline at end of file diff --git a/libm/math/sincosf/fn.sincosf.html b/libm/math/sincosf/fn.sincosf.html new file mode 100644 index 000000000..0fccbd439 --- /dev/null +++ b/libm/math/sincosf/fn.sincosf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sincosf.html...

+ + + \ No newline at end of file diff --git a/libm/math/sinf/fn.sinf.html b/libm/math/sinf/fn.sinf.html new file mode 100644 index 000000000..225a058fe --- /dev/null +++ b/libm/math/sinf/fn.sinf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sinf.html...

+ + + \ No newline at end of file diff --git a/libm/math/sinh/fn.sinh.html b/libm/math/sinh/fn.sinh.html new file mode 100644 index 000000000..755c9bf90 --- /dev/null +++ b/libm/math/sinh/fn.sinh.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sinh.html...

+ + + \ No newline at end of file diff --git a/libm/math/sinhf/fn.sinhf.html b/libm/math/sinhf/fn.sinhf.html new file mode 100644 index 000000000..fa62255f8 --- /dev/null +++ b/libm/math/sinhf/fn.sinhf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sinhf.html...

+ + + \ No newline at end of file diff --git a/libm/math/sqrt/fn.sqrt.html b/libm/math/sqrt/fn.sqrt.html new file mode 100644 index 000000000..11b57912c --- /dev/null +++ b/libm/math/sqrt/fn.sqrt.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sqrt.html...

+ + + \ No newline at end of file diff --git a/libm/math/sqrtf/fn.sqrtf.html b/libm/math/sqrtf/fn.sqrtf.html new file mode 100644 index 000000000..aa23fac24 --- /dev/null +++ b/libm/math/sqrtf/fn.sqrtf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.sqrtf.html...

+ + + \ No newline at end of file diff --git a/libm/math/tan/fn.tan.html b/libm/math/tan/fn.tan.html new file mode 100644 index 000000000..32620c8cd --- /dev/null +++ b/libm/math/tan/fn.tan.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.tan.html...

+ + + \ No newline at end of file diff --git a/libm/math/tanf/fn.tanf.html b/libm/math/tanf/fn.tanf.html new file mode 100644 index 000000000..a9d01a307 --- /dev/null +++ b/libm/math/tanf/fn.tanf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.tanf.html...

+ + + \ No newline at end of file diff --git a/libm/math/tanh/fn.tanh.html b/libm/math/tanh/fn.tanh.html new file mode 100644 index 000000000..4682b0736 --- /dev/null +++ b/libm/math/tanh/fn.tanh.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.tanh.html...

+ + + \ No newline at end of file diff --git a/libm/math/tanhf/fn.tanhf.html b/libm/math/tanhf/fn.tanhf.html new file mode 100644 index 000000000..fbe37e1d6 --- /dev/null +++ b/libm/math/tanhf/fn.tanhf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.tanhf.html...

+ + + \ No newline at end of file diff --git a/libm/math/tgamma/fn.tgamma.html b/libm/math/tgamma/fn.tgamma.html new file mode 100644 index 000000000..c2dc6616d --- /dev/null +++ b/libm/math/tgamma/fn.tgamma.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.tgamma.html...

+ + + \ No newline at end of file diff --git a/libm/math/tgammaf/fn.tgammaf.html b/libm/math/tgammaf/fn.tgammaf.html new file mode 100644 index 000000000..429ddcd2c --- /dev/null +++ b/libm/math/tgammaf/fn.tgammaf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.tgammaf.html...

+ + + \ No newline at end of file diff --git a/libm/math/trunc/fn.trunc.html b/libm/math/trunc/fn.trunc.html new file mode 100644 index 000000000..e615a5d6b --- /dev/null +++ b/libm/math/trunc/fn.trunc.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.trunc.html...

+ + + \ No newline at end of file diff --git a/libm/math/truncf/fn.truncf.html b/libm/math/truncf/fn.truncf.html new file mode 100644 index 000000000..41859aceb --- /dev/null +++ b/libm/math/truncf/fn.truncf.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../libm/fn.truncf.html...

+ + + \ No newline at end of file diff --git a/libm/sidebar-items.js b/libm/sidebar-items.js new file mode 100644 index 000000000..468bb0edf --- /dev/null +++ b/libm/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"fn":[["acos","Arccosine (f64)"],["acosf","Arccosine (f32)"],["acosh","Inverse hyperbolic cosine (f64)"],["acoshf","Inverse hyperbolic cosine (f32)"],["asin","Arcsine (f64)"],["asinf","Arcsine (f32)"],["asinh","Inverse hyperbolic sine (f64)"],["asinhf","Inverse hyperbolic sine (f32)"],["atan","Arctangent (f64)"],["atan2","Arctangent of y/x (f64)"],["atan2f","Arctangent of y/x (f32)"],["atanf","Arctangent (f32)"],["atanh","Inverse hyperbolic tangent (f64)"],["atanhf","Inverse hyperbolic tangent (f32)"],["cbrt","Computes the cube root of the argument."],["cbrtf","Cube root (f32)"],["ceil","Ceil (f64)"],["ceilf","Ceil (f32)"],["copysign","Sign of Y, magnitude of X (f64)"],["copysignf","Sign of Y, magnitude of X (f32)"],["cos",""],["cosf",""],["cosh","Hyperbolic cosine (f64)"],["coshf","Hyperbolic cosine (f64)"],["erf","Error function (f64)"],["erfc","Error function (f64)"],["erfcf","Error function (f32)"],["erff","Error function (f32)"],["exp","Exponential, base e (f64)"],["exp10",""],["exp10f",""],["exp2","Exponential, base 2 (f64)"],["exp2f","Exponential, base 2 (f32)"],["expf","Exponential, base e (f32)"],["expm1","Exponential, base e, of x-1 (f64)"],["expm1f","Exponential, base e, of x-1 (f32)"],["fabs","Absolute value (magnitude) (f64) Calculates the absolute value (magnitude) of the argument `x`, by direct manipulation of the bit representation of `x`."],["fabsf","Absolute value (magnitude) (f32) Calculates the absolute value (magnitude) of the argument `x`, by direct manipulation of the bit representation of `x`."],["fdim","Positive difference (f64)"],["fdimf","Positive difference (f32)"],["floor","Floor (f64)"],["floorf","Floor (f64)"],["fma","Floating multiply add (f64)"],["fmaf","Floating multiply add (f32)"],["fmax",""],["fmaxf",""],["fmin",""],["fminf",""],["fmod",""],["fmodf",""],["frexp",""],["frexpf",""],["hypot",""],["hypotf",""],["ilogb",""],["ilogbf",""],["j0",""],["j0f",""],["j1",""],["j1f",""],["jn",""],["jnf",""],["ldexp",""],["ldexpf",""],["lgamma",""],["lgamma_r",""],["lgammaf",""],["lgammaf_r",""],["log",""],["log10",""],["log10f",""],["log1p",""],["log1pf",""],["log2",""],["log2f",""],["logf",""],["modf",""],["modff",""],["pow",""],["powf",""],["remquo",""],["remquof",""],["round",""],["roundf",""],["scalbn",""],["scalbnf",""],["sin",""],["sincos",""],["sincosf",""],["sinf",""],["sinh",""],["sinhf",""],["sqrt",""],["sqrtf",""],["tan",""],["tanf",""],["tanh",""],["tanhf",""],["tgamma",""],["tgammaf",""],["trunc",""],["truncf",""],["y0",""],["y0f",""],["y1",""],["y1f",""],["yn",""],["ynf",""]],"trait":[["F32Ext","Math support for `f32`"],["F64Ext","Math support for `f64`"]]}); \ No newline at end of file diff --git a/libm/trait.F32Ext.html b/libm/trait.F32Ext.html new file mode 100644 index 000000000..3f09a3540 --- /dev/null +++ b/libm/trait.F32Ext.html @@ -0,0 +1,44 @@ +libm::F32Ext - Rust

[][src]Trait libm::F32Ext

pub trait F32Ext: Sealed + Sized {
+    fn floor(self) -> Self;
+
fn ceil(self) -> Self; +
fn round(self) -> Self; +
fn trunc(self) -> Self; +
fn fdim(self, rhs: Self) -> Self; +
fn fract(self) -> Self; +
fn abs(self) -> Self; +
fn mul_add(self, a: Self, b: Self) -> Self; +
fn div_euc(self, rhs: Self) -> Self; +
fn mod_euc(self, rhs: Self) -> Self; +
fn powf(self, n: Self) -> Self; +
fn sqrt(self) -> Self; +
fn exp(self) -> Self; +
fn exp2(self) -> Self; +
fn ln(self) -> Self; +
fn log(self, base: Self) -> Self; +
fn log2(self) -> Self; +
fn log10(self) -> Self; +
fn cbrt(self) -> Self; +
fn hypot(self, other: Self) -> Self; +
fn sin(self) -> Self; +
fn cos(self) -> Self; +
fn tan(self) -> Self; +
fn asin(self) -> Self; +
fn acos(self) -> Self; +
fn atan(self) -> Self; +
fn atan2(self, other: Self) -> Self; +
fn sin_cos(self) -> (Self, Self); +
fn exp_m1(self) -> Self; +
fn ln_1p(self) -> Self; +
fn sinh(self) -> Self; +
fn cosh(self) -> Self; +
fn tanh(self) -> Self; +
fn asinh(self) -> Self; +
fn acosh(self) -> Self; +
fn atanh(self) -> Self; +
fn min(self, other: Self) -> Self; +
fn max(self, other: Self) -> Self; +}

Math support for f32

+

This trait is sealed and cannot be implemented outside of libm.

+
+

Required methods

fn floor(self) -> Self

fn ceil(self) -> Self

fn round(self) -> Self

fn trunc(self) -> Self

fn fdim(self, rhs: Self) -> Self

fn fract(self) -> Self

fn abs(self) -> Self

fn mul_add(self, a: Self, b: Self) -> Self

fn div_euc(self, rhs: Self) -> Self

fn mod_euc(self, rhs: Self) -> Self

fn powf(self, n: Self) -> Self

fn sqrt(self) -> Self

fn exp(self) -> Self

fn exp2(self) -> Self

fn ln(self) -> Self

fn log(self, base: Self) -> Self

fn log2(self) -> Self

fn log10(self) -> Self

fn cbrt(self) -> Self

fn hypot(self, other: Self) -> Self

fn sin(self) -> Self

fn cos(self) -> Self

fn tan(self) -> Self

fn asin(self) -> Self

fn acos(self) -> Self

fn atan(self) -> Self

fn atan2(self, other: Self) -> Self

fn sin_cos(self) -> (Self, Self)

fn exp_m1(self) -> Self

fn ln_1p(self) -> Self

fn sinh(self) -> Self

fn cosh(self) -> Self

fn tanh(self) -> Self

fn asinh(self) -> Self

fn acosh(self) -> Self

fn atanh(self) -> Self

fn min(self, other: Self) -> Self

fn max(self, other: Self) -> Self

Loading content... +

Implementors

impl F32Ext for f32[src]

Loading content...
\ No newline at end of file diff --git a/libm/trait.F64Ext.html b/libm/trait.F64Ext.html new file mode 100644 index 000000000..c6248de85 --- /dev/null +++ b/libm/trait.F64Ext.html @@ -0,0 +1,44 @@ +libm::F64Ext - Rust

[][src]Trait libm::F64Ext

pub trait F64Ext: Sealed + Sized {
+    fn floor(self) -> Self;
+
fn ceil(self) -> Self; +
fn round(self) -> Self; +
fn trunc(self) -> Self; +
fn fdim(self, rhs: Self) -> Self; +
fn fract(self) -> Self; +
fn abs(self) -> Self; +
fn mul_add(self, a: Self, b: Self) -> Self; +
fn div_euc(self, rhs: Self) -> Self; +
fn mod_euc(self, rhs: Self) -> Self; +
fn powf(self, n: Self) -> Self; +
fn sqrt(self) -> Self; +
fn exp(self) -> Self; +
fn exp2(self) -> Self; +
fn ln(self) -> Self; +
fn log(self, base: Self) -> Self; +
fn log2(self) -> Self; +
fn log10(self) -> Self; +
fn cbrt(self) -> Self; +
fn hypot(self, other: Self) -> Self; +
fn sin(self) -> Self; +
fn cos(self) -> Self; +
fn tan(self) -> Self; +
fn asin(self) -> Self; +
fn acos(self) -> Self; +
fn atan(self) -> Self; +
fn atan2(self, other: Self) -> Self; +
fn sin_cos(self) -> (Self, Self); +
fn exp_m1(self) -> Self; +
fn ln_1p(self) -> Self; +
fn sinh(self) -> Self; +
fn cosh(self) -> Self; +
fn tanh(self) -> Self; +
fn asinh(self) -> Self; +
fn acosh(self) -> Self; +
fn atanh(self) -> Self; +
fn min(self, other: Self) -> Self; +
fn max(self, other: Self) -> Self; +}

Math support for f64

+

This trait is sealed and cannot be implemented outside of libm.

+
+

Required methods

fn floor(self) -> Self

fn ceil(self) -> Self

fn round(self) -> Self

fn trunc(self) -> Self

fn fdim(self, rhs: Self) -> Self

fn fract(self) -> Self

fn abs(self) -> Self

fn mul_add(self, a: Self, b: Self) -> Self

fn div_euc(self, rhs: Self) -> Self

fn mod_euc(self, rhs: Self) -> Self

fn powf(self, n: Self) -> Self

fn sqrt(self) -> Self

fn exp(self) -> Self

fn exp2(self) -> Self

fn ln(self) -> Self

fn log(self, base: Self) -> Self

fn log2(self) -> Self

fn log10(self) -> Self

fn cbrt(self) -> Self

fn hypot(self, other: Self) -> Self

fn sin(self) -> Self

fn cos(self) -> Self

fn tan(self) -> Self

fn asin(self) -> Self

fn acos(self) -> Self

fn atan(self) -> Self

fn atan2(self, other: Self) -> Self

fn sin_cos(self) -> (Self, Self)

fn exp_m1(self) -> Self

fn ln_1p(self) -> Self

fn sinh(self) -> Self

fn cosh(self) -> Self

fn tanh(self) -> Self

fn asinh(self) -> Self

fn acosh(self) -> Self

fn atanh(self) -> Self

fn min(self, other: Self) -> Self

fn max(self, other: Self) -> Self

Loading content... +

Implementors

impl F64Ext for f64[src]

Loading content...
\ No newline at end of file diff --git a/light.css b/light.css new file mode 100644 index 000000000..824281ebe --- /dev/null +++ b/light.css @@ -0,0 +1 @@ + body{background-color:white;color:black;}h1,h2,h3:not(.impl):not(.method):not(.type):not(.tymethod),h4:not(.method):not(.type):not(.tymethod){color:black;}h1.fqn{border-bottom-color:#D5D5D5;}h2,h3:not(.impl):not(.method):not(.type):not(.tymethod),h4:not(.method):not(.type):not(.tymethod){border-bottom-color:#DDDDDD;}.in-band{background-color:white;}.invisible{background:rgba(0,0,0,0);}.docblock code,.docblock-short code{background-color:#F5F5F5;}pre{background-color:#F5F5F5;}.sidebar{background-color:#F1F1F1;}*{scrollbar-color:rgba(36,37,39,0.6) #e6e6e6;}.sidebar{scrollbar-color:rgba(36,37,39,0.6) #d9d9d9;}::-webkit-scrollbar-track{background-color:#ecebeb;}::-webkit-scrollbar-thumb{background-color:rgba(36,37,39,0.6);}.sidebar::-webkit-scrollbar-track{background-color:#dcdcdc;}.sidebar::-webkit-scrollbar-thumb{background-color:rgba(36,37,39,0.6);}.sidebar .current{background-color:#fff;}.source .sidebar{background-color:#fff;}.sidebar .location{border-color:#000;background-color:#fff;color:#333;}.sidebar .version{border-bottom-color:#DDD;}.sidebar-title{border-top-color:#777;border-bottom-color:#777;}.block a:hover{background:#F5F5F5;}.line-numbers span{color:#c67e2d;}.line-numbers .line-highlighted{background-color:#f6fdb0 !important;}.docblock h1,.docblock h2,.docblock h3,.docblock h4,.docblock h5{border-bottom-color:#ddd;}.docblock table,.docblock table td,.docblock table th{border-color:#ddd;}.content .method .where,.content .fn .where,.content .where.fmt-newline{color:#4E4C4C;}.content .highlighted{color:#000 !important;background-color:#ccc;}.content .highlighted a,.content .highlighted span{color:#000 !important;}.content .highlighted.trait{background-color:#c7b6ff;}.content .highlighted.traitalias{background-color:#c7b6ff;}.content .highlighted.mod,.content .highlighted.externcrate{background-color:#afc6e4;}.content .highlighted.enum{background-color:#b4d1b9;}.content .highlighted.struct{background-color:#e7b1a0;}.content .highlighted.union{background-color:#b7bd49;}.content .highlighted.fn,.content .highlighted.method,.content .highlighted.tymethod{background-color:#c6afb3;}.content .highlighted.type{background-color:#ffc891;}.content .highlighted.foreigntype{background-color:#f5c4ff;}.content .highlighted.attr,.content .highlighted.derive,.content .highlighted.macro{background-color:#8ce488;}.content .highlighted.constant,.content .highlighted.static{background-color:#c3e0ff;}.content .highlighted.primitive{background-color:#9aecff;}.content .highlighted.keyword{background-color:#f99650;}.content .stability::before{color:#ccc;}.content span.enum,.content a.enum,.block a.current.enum{color:#508157;}.content span.struct,.content a.struct,.block a.current.struct{color:#ad448e;}.content span.type,.content a.type,.block a.current.type{color:#ba5d00;}.content span.foreigntype,.content a.foreigntype,.block a.current.foreigntype{color:#cd00e2;}.content span.attr,.content a.attr,.block a.current.attr,.content span.derive,.content a.derive,.block a.current.derive,.content span.macro,.content a.macro,.block a.current.macro{color:#068000;}.content span.union,.content a.union,.block a.current.union{color:#767b27;}.content span.constant,.content a.constant,.block a.current.constant,.content span.static,.content a.static,.block a.current.static{color:#546e8a;}.content span.primitive,.content a.primitive,.block a.current.primitive{color:#2c8093;}.content span.externcrate,.content span.mod,.content a.mod,.block a.current.mod{color:#4d76ae;}.content span.trait,.content a.trait,.block a.current.trait{color:#7c5af3;}.content span.traitalias,.content a.traitalias,.block a.current.traitalias{color:#6841f1;}.content span.fn,.content a.fn,.block a.current.fn,.content span.method,.content a.method,.block a.current.method,.content span.tymethod,.content a.tymethod,.block a.current.tymethod,.content .fnname{color:#9a6e31;}.content span.keyword,.content a.keyword,.block a.current.keyword{color:#de5249;}pre.rust .comment{color:#8E908C;}pre.rust .doccomment{color:#4D4D4C;}nav:not(.sidebar){border-bottom-color:#e0e0e0;}nav.main .current{border-top-color:#000;border-bottom-color:#000;}nav.main .separator{border:1px solid #000;}a{color:#000;}.docblock:not(.type-decl) a:not(.srclink):not(.test-arrow),.docblock-short a:not(.srclink):not(.test-arrow),.stability a{color:#3873AD;}.stab.internal a{color:#304FFE;}a.test-arrow{color:#f5f5f5;}.collapse-toggle{color:#999;}#crate-search{color:#555;background-color:white;border-color:#e0e0e0;box-shadow:0 0 0 1px #e0e0e0,0 0 0 2px transparent;}.search-input{color:#555;background-color:white;box-shadow:0 0 0 1px #e0e0e0,0 0 0 2px transparent;}.search-input:focus{border-color:#66afe9;}.search-focus:disabled{background-color:#e6e6e6;}#crate-search+.search-input:focus{box-shadow:0 0 8px #078dd8;}.module-item .stab{color:#000;}.stab.unstable{background:#FFF5D6;border-color:#FFC600;}.stab.internal{background:#FFB9B3;border-color:#B71C1C;}.stab.deprecated{background:#F3DFFF;border-color:#7F0087;}.stab.portability{background:#C4ECFF;border-color:#7BA5DB;}.stab.portability>code{color:#000;}#help>div{background:#e9e9e9;border-color:#bfbfbf;}.since{color:grey;}tr.result span.primitive::after,tr.result span.keyword::after{color:black;}.line-numbers :target{background-color:transparent;}pre.rust .kw{color:#8959A8;}pre.rust .kw-2,pre.rust .prelude-ty{color:#4271AE;}pre.rust .number,pre.rust .string{color:#718C00;}pre.rust .self,pre.rust .bool-val,pre.rust .prelude-val,pre.rust .attribute,pre.rust .attribute .ident{color:#C82829;}pre.rust .macro,pre.rust .macro-nonterminal{color:#3E999F;}pre.rust .lifetime{color:#B76514;}pre.rust .question-mark{color:#ff9011;}.example-wrap>pre.line-number{border-color:#c7c7c7;}a.test-arrow{background-color:rgba(78,139,202,0.2);}a.test-arrow:hover{background-color:#4e8bca;}.toggle-label{color:#999;}:target>code,:target>.in-band{background:#FDFFD3;}pre.compile_fail{border-left:2px solid rgba(255,0,0,.5);}pre.compile_fail:hover,.information:hover+pre.compile_fail{border-left:2px solid #f00;}pre.should_panic{border-left:2px solid rgba(255,0,0,.5);}pre.should_panic:hover,.information:hover+pre.should_panic{border-left:2px solid #f00;}pre.ignore{border-left:2px solid rgba(255,142,0,.6);}pre.ignore:hover,.information:hover+pre.ignore{border-left:2px solid #ff9200;}.tooltip.compile_fail{color:rgba(255,0,0,.5);}.information>.compile_fail:hover{color:#f00;}.tooltip.should_panic{color:rgba(255,0,0,.5);}.information>.should_panic:hover{color:#f00;}.tooltip.ignore{color:rgba(255,142,0,.6);}.information>.ignore:hover{color:#ff9200;}.search-failed a{color:#0089ff;}.tooltip .tooltiptext{background-color:#000;color:#fff;}.tooltip .tooltiptext::after{border-color:transparent black transparent transparent;}#titles>div:not(.selected){background-color:#e6e6e6;border-top-color:#e6e6e6;}#titles>div:hover,#titles>div.selected{border-top-color:#0089ff;}#titles>div>div.count{color:#888;}@media (max-width:700px){.sidebar-menu{background-color:#F1F1F1;border-bottom-color:#e0e0e0;border-right-color:#e0e0e0;}.sidebar-elems{background-color:#F1F1F1;border-right-color:#000;}#sidebar-filler{background-color:#F1F1F1;border-bottom-color:#e0e0e0;}}kbd{color:#000;background-color:#fafbfc;border-color:#d1d5da;border-bottom-color:#c6cbd1;box-shadow-color:#c6cbd1;}#theme-picker,#settings-menu{border-color:#e0e0e0;background-color:#fff;}#theme-picker:hover,#theme-picker:focus,#settings-menu:hover,#settings-menu:focus{border-color:#717171;}#theme-choices{border-color:#ccc;background-color:#fff;}#theme-choices>button:not(:first-child){border-top-color:#e0e0e0;}#theme-choices>button:hover,#theme-choices>button:focus{background-color:#eee;}@media (max-width:700px){#theme-picker{background:#fff;}}#all-types{background-color:#fff;}#all-types:hover{background-color:#f9f9f9;}.search-results td span.alias{color:#000;}.search-results td span.grey{color:#999;}#sidebar-toggle{background-color:#F1F1F1;}#sidebar-toggle:hover{background-color:#E0E0E0;}#source-sidebar{background-color:#F1F1F1;}#source-sidebar>.title{border-bottom-color:#ccc;}div.files>a:hover,div.name:hover{background-color:#E0E0E0;}div.files>.selected{background-color:#fff;}.setting-line>.title{border-bottom-color:#D5D5D5;} \ No newline at end of file diff --git a/main.js b/main.js new file mode 100644 index 000000000..9dec30518 --- /dev/null +++ b/main.js @@ -0,0 +1,7 @@ +if(!String.prototype.startsWith){String.prototype.startsWith=function(searchString,position){position=position||0;return this.indexOf(searchString,position)===position}}if(!String.prototype.endsWith){String.prototype.endsWith=function(suffix,length){var l=length||this.length;return this.indexOf(suffix,l-suffix.length)!==-1}}if(!DOMTokenList.prototype.add){DOMTokenList.prototype.add=function(className){if(className&&!hasClass(this,className)){if(this.className&&this.className.length>0){this.className+=" "+className}else{this.className=className}}}}if(!DOMTokenList.prototype.remove){DOMTokenList.prototype.remove=function(className){if(className&&this.className){this.className=(" "+this.className+" ").replace(" "+className+" "," ").trim()}}}function getSearchInput(){return document.getElementsByClassName("search-input")[0]}function getSearchElement(){return document.getElementById("search")}function focusSearchBar(){getSearchInput().focus()}function defocusSearchBar(){getSearchInput().blur()}(function(){"use strict";var itemTypes=["mod","externcrate","import","struct","enum","fn","type","static","trait","impl","tymethod","method","structfield","variant","macro","primitive","associatedtype","constant","associatedconstant","union","foreigntype","keyword","existential","attr","derive","traitalias"];var disableShortcuts=getCurrentValue("rustdoc-disable-shortcuts")==="true";var search_input=getSearchInput();var searchTimeout=null;var currentTab=0;var mouseMovedAfterSearch=true;var titleBeforeSearch=document.title;function clearInputTimeout(){if(searchTimeout!==null){clearTimeout(searchTimeout);searchTimeout=null}}function getPageId(){var id=document.location.href.split("#")[1];if(id){return id.split("?")[0].split("&")[0]}return null}function showSidebar(){var elems=document.getElementsByClassName("sidebar-elems")[0];if(elems){addClass(elems,"show-it")}var sidebar=document.getElementsByClassName("sidebar")[0];if(sidebar){addClass(sidebar,"mobile");var filler=document.getElementById("sidebar-filler");if(!filler){var div=document.createElement("div");div.id="sidebar-filler";sidebar.appendChild(div)}}var themePickers=document.getElementsByClassName("theme-picker");if(themePickers&&themePickers.length>0){themePickers[0].style.display="none"}}function hideSidebar(){var elems=document.getElementsByClassName("sidebar-elems")[0];if(elems){removeClass(elems,"show-it")}var sidebar=document.getElementsByClassName("sidebar")[0];removeClass(sidebar,"mobile");var filler=document.getElementById("sidebar-filler");if(filler){filler.remove()}document.getElementsByTagName("body")[0].style.marginTop="";var themePickers=document.getElementsByClassName("theme-picker");if(themePickers&&themePickers.length>0){themePickers[0].style.display=null}}function showSearchResults(search){if(search===null||typeof search==='undefined'){search=getSearchElement()}addClass(main,"hidden");removeClass(search,"hidden");mouseMovedAfterSearch=false}function hideSearchResults(search){if(search===null||typeof search==='undefined'){search=getSearchElement()}addClass(search,"hidden");removeClass(main,"hidden")}var TY_PRIMITIVE=itemTypes.indexOf("primitive");var TY_KEYWORD=itemTypes.indexOf("keyword");function getQueryStringParams(){var params={};window.location.search.substring(1).split("&").map(function(s){var pair=s.split("=");params[decodeURIComponent(pair[0])]=typeof pair[1]==="undefined"?null:decodeURIComponent(pair[1])});return params}function browserSupportsHistoryApi(){return window.history&&typeof window.history.pushState==="function"}function isHidden(elem){return elem.offsetHeight===0}var main=document.getElementById("main");var savedHash="";function handleHashes(ev){var elem;var search=getSearchElement();if(ev!==null&&search&&!hasClass(search,"hidden")&&ev.newURL){hideSearchResults(search);var hash=ev.newURL.slice(ev.newURL.indexOf("#")+1);if(browserSupportsHistoryApi()){history.replaceState(hash,"","?search=#"+hash)}elem=document.getElementById(hash);if(elem){elem.scrollIntoView()}}if(savedHash!==window.location.hash){savedHash=window.location.hash;if(savedHash.length===0){return}elem=document.getElementById(savedHash.slice(1));if(!elem||!isHidden(elem)){return}var parent=elem.parentNode;if(parent&&hasClass(parent,"impl-items")){onEachLazy(parent.getElementsByClassName("collapsed"),function(e){if(e.parentNode===parent){e.click();return true}});if(isHidden(elem)){if(hasClass(parent.lastElementChild,"collapse-toggle")){parent.lastElementChild.click()}}}}}function highlightSourceLines(match,ev){if(typeof match==="undefined"){hideSidebar();match=window.location.hash.match(/^#?(\d+)(?:-(\d+))?$/)}if(!match){return}var from=parseInt(match[1],10);var to=from;if(typeof match[2]!=="undefined"){to=parseInt(match[2],10)}if(to0){collapseDocs(collapses[0],"show")}}}}function getVirtualKey(ev){if("key"in ev&&typeof ev.key!="undefined"){return ev.key}var c=ev.charCode||ev.keyCode;if(c==27){return"Escape"}return String.fromCharCode(c)}function getHelpElement(){return document.getElementById("help")}function displayHelp(display,ev,help){help=help?help:getHelpElement();if(display===true){if(hasClass(help,"hidden")){ev.preventDefault();removeClass(help,"hidden");addClass(document.body,"blur")}}else if(hasClass(help,"hidden")===false){ev.preventDefault();addClass(help,"hidden");removeClass(document.body,"blur")}}function handleEscape(ev){var help=getHelpElement();var search=getSearchElement();if(hasClass(help,"hidden")===false){displayHelp(false,ev,help)}else if(hasClass(search,"hidden")===false){clearInputTimeout();ev.preventDefault();hideSearchResults(search);document.title=titleBeforeSearch}defocusSearchBar()}function handleShortcut(ev){if(ev.ctrlKey||ev.altKey||ev.metaKey||disableShortcuts===true){return}if(document.activeElement.tagName==="INPUT"){switch(getVirtualKey(ev)){case"Escape":handleEscape(ev);break}}else{switch(getVirtualKey(ev)){case"Escape":handleEscape(ev);break;case"s":case"S":displayHelp(false,ev);ev.preventDefault();focusSearchBar();break;case"+":case"-":ev.preventDefault();toggleAllDocs();break;case"?":if(ev.shiftKey){displayHelp(true,ev)}break}}}function findParentElement(elem,tagName){do{if(elem&&elem.tagName===tagName){return elem}elem=elem.parentNode}while(elem);return null}document.addEventListener("keypress",handleShortcut);document.addEventListener("keydown",handleShortcut);function resetMouseMoved(ev){mouseMovedAfterSearch=true}document.addEventListener("mousemove",resetMouseMoved);var handleSourceHighlight=(function(){var prev_line_id=0;var set_fragment=function(name){var x=window.scrollX,y=window.scrollY;if(browserSupportsHistoryApi()){history.replaceState(null,null,"#"+name);highlightSourceLines()}else{location.replace("#"+name)}window.scrollTo(x,y)};return function(ev){var cur_line_id=parseInt(ev.target.id,10);ev.preventDefault();if(ev.shiftKey&&prev_line_id){if(prev_line_id>cur_line_id){var tmp=prev_line_id;prev_line_id=cur_line_id;cur_line_id=tmp}set_fragment(prev_line_id+"-"+cur_line_id)}else{prev_line_id=cur_line_id;set_fragment(cur_line_id)}}}());document.addEventListener("click",function(ev){if(hasClass(ev.target,"collapse-toggle")){collapseDocs(ev.target,"toggle")}else if(hasClass(ev.target.parentNode,"collapse-toggle")){collapseDocs(ev.target.parentNode,"toggle")}else if(ev.target.tagName==="SPAN"&&hasClass(ev.target.parentNode,"line-numbers")){handleSourceHighlight(ev)}else if(hasClass(getHelpElement(),"hidden")===false){var help=getHelpElement();var is_inside_help_popup=ev.target!==help&&help.contains(ev.target);if(is_inside_help_popup===false){addClass(help,"hidden");removeClass(document.body,"blur")}}else{var a=findParentElement(ev.target,"A");if(a&&a.hash){expandSection(a.hash.replace(/^#/,""))}}});(function(){var x=document.getElementsByClassName("version-selector");if(x.length>0){x[0].onchange=function(){var i,match,url=document.location.href,stripped="",len=rootPath.match(/\.\.\//g).length+1;for(i=0;i-1){var obj=searchIndex[results[i].id];obj.lev=results[i].lev;if(isType!==true||obj.type){var res=buildHrefAndPath(obj);obj.displayPath=pathSplitter(res[0]);obj.fullPath=obj.displayPath+obj.name;obj.fullPath+="|"+obj.ty;obj.href=res[1];out.push(obj);if(out.length>=MAX_RESULTS){break}}}}return out}function sortResults(results,isType){var ar=[];for(var entry in results){if(hasOwnProperty(results,entry)){ar.push(results[entry])}}results=ar;var i;var nresults=results.length;for(i=0;ib?+1:-1)}a=(aaa.index<0);b=(bbb.index<0);if(a!==b){return a-b}a=aaa.index;b=bbb.index;if(a!==b){return a-b}if((aaa.item.ty===TY_PRIMITIVE&&bbb.item.ty!==TY_KEYWORD)||(aaa.item.ty===TY_KEYWORD&&bbb.item.ty!==TY_PRIMITIVE)){return-1}if((bbb.item.ty===TY_PRIMITIVE&&aaa.item.ty!==TY_PRIMITIVE)||(bbb.item.ty===TY_KEYWORD&&aaa.item.ty!==TY_KEYWORD)){return 1}a=(aaa.item.desc==="");b=(bbb.item.desc==="");if(a!==b){return a-b}a=aaa.item.ty;b=bbb.item.ty;if(a!==b){return a-b}a=aaa.item.path;b=bbb.item.path;if(a!==b){return(a>b?+1:-1)}return 0});var length=results.length;for(i=0;i"));return{name:val.substring(0,val.indexOf("<")),generics:values.split(/\s*,\s*/),}}return{name:val,generics:[],}}function getObjectFromId(id){if(typeof id==="number"){return searchIndex[id]}return{'name':id}}function checkGenerics(obj,val){var lev_distance=MAX_LEV_DISTANCE+1;if(val.generics.length>0){if(obj.length>GENERICS_DATA&&obj[GENERICS_DATA].length>=val.generics.length){var elems=obj[GENERICS_DATA].slice(0);var total=0;var done=0;var vlength=val.generics.length;for(var y=0;yGENERICS_DATA&&obj[GENERICS_DATA].length>=val.generics.length){var elems=obj[GENERICS_DATA].slice(0);var allFound=true;for(var y=0;allFound===true&&yGENERICS_DATA&&obj[GENERICS_DATA].length!==0){var tmp_lev=checkGenerics(obj,val);if(tmp_lev<=MAX_LEV_DISTANCE){return tmp_lev}}else{return 0}}if(literalSearch===true){if(obj.length>GENERICS_DATA&&obj[GENERICS_DATA].length>0){var length=obj[GENERICS_DATA].length;for(x=0;xGENERICS_DATA&&obj[GENERICS_DATA].length>0){var olength=obj[GENERICS_DATA].length;for(x=0;x0){var length=obj.type[INPUTS_DATA].length;for(var i=0;iOUTPUT_DATA){var ret=obj.type[OUTPUT_DATA];if(typeof ret[0]==="string"){ret=[ret]}for(var x=0;xlength){return MAX_LEV_DISTANCE+1}for(var i=0;ilength){break}var lev_total=0;var aborted=false;for(var x=0;xMAX_LEV_DISTANCE){aborted=true;break}lev_total+=lev}if(aborted===false){ret_lev=Math.min(ret_lev,Math.round(lev_total/clength))}}return ret_lev}function typePassesFilter(filter,type){if(filter<=NO_TYPE_FILTER)return true;if(filter===type)return true;var name=itemTypes[type];switch(itemTypes[filter]){case"constant":return name==="associatedconstant";case"fn":return name==="method"||name==="tymethod";case"type":return name==="primitive"||name==="associatedtype";case"trait":return name==="traitalias"}return false}function generateId(ty){if(ty.parent&&ty.parent.name){return itemTypes[ty.ty]+ty.path+ty.parent.name+ty.name}return itemTypes[ty.ty]+ty.path+ty.name}function createAliasFromItem(item){return{crate:item.crate,name:item.name,path:item.path,desc:item.desc,ty:item.ty,parent:item.parent,type:item.type,is_alias:true,}}function handleAliases(ret,query,filterCrates){var aliases=[];var crateAliases=[];var i;if(filterCrates!==undefined){if(ALIASES[filterCrates]&&ALIASES[filterCrates][query.search]){for(i=0;iMAX_RESULTS){ret.others.pop()}};onEach(aliases,pushFunc);onEach(crateAliases,pushFunc)}var nSearchWords=searchWords.length;var i;var ty;var fullId;var returned;var in_args;if((val.charAt(0)==="\""||val.charAt(0)==="'")&&val.charAt(val.length-1)===val.charAt(0)){val=extractGenerics(val.substr(1,val.length-2));for(i=0;i")>-1){var trimmer=function(s){return s.trim()};var parts=val.split("->").map(trimmer);var input=parts[0];var inputs=input.split(",").map(trimmer).sort();for(i=0;i1?paths.length-1:1);var lev;for(j=0;j1){lev=checkPath(contains,paths[paths.length-1],ty);if(lev>MAX_LEV_DISTANCE){continue}else if(lev>0){lev_add=lev/10}}returned=MAX_LEV_DISTANCE+1;in_args=MAX_LEV_DISTANCE+1;var index=-1;lev=MAX_LEV_DISTANCE+1;fullId=generateId(ty);if(searchWords[j].indexOf(split[i])>-1||searchWords[j].indexOf(val)>-1||searchWords[j].replace(/_/g,"").indexOf(val)>-1){if(typePassesFilter(typeFilter,ty.ty)&&results[fullId]===undefined){index=searchWords[j].replace(/_/g,"").indexOf(val)}}if((lev=levenshtein(searchWords[j],val))<=MAX_LEV_DISTANCE){if(typePassesFilter(typeFilter,ty.ty)===false){lev=MAX_LEV_DISTANCE+1}else{lev+=1}}in_args=findArg(ty,valGenerics,false,typeFilter);returned=checkReturned(ty,valGenerics,false,typeFilter);lev+=lev_add;if(lev>0&&val.length>3&&searchWords[j].indexOf(val)>-1){if(val.length<6){lev-=1}else{lev=0}}if(in_args<=MAX_LEV_DISTANCE){if(results_in_args[fullId]===undefined){results_in_args[fullId]={id:j,index:index,lev:in_args,}}results_in_args[fullId].lev=Math.min(results_in_args[fullId].lev,in_args)}if(returned<=MAX_LEV_DISTANCE){if(results_returned[fullId]===undefined){results_returned[fullId]={id:j,index:index,lev:returned,}}results_returned[fullId].lev=Math.min(results_returned[fullId].lev,returned)}if(index!==-1||lev<=MAX_LEV_DISTANCE){if(index!==-1&&paths.length<2){lev=0}if(results[fullId]===undefined){results[fullId]={id:j,index:index,lev:lev,}}results[fullId].lev=Math.min(results[fullId].lev,lev)}}}var ret={"in_args":sortResults(results_in_args,true),"returned":sortResults(results_returned,true),"others":sortResults(results),};handleAliases(ret,query,filterCrates);return ret}function validateResult(name,path,keys,parent){for(var i=0;i-1||path.indexOf(keys[i])>-1||(parent!==undefined&&parent.name!==undefined&&parent.name.toLowerCase().indexOf(keys[i])>-1)||levenshtein(name,keys[i])<=MAX_LEV_DISTANCE)){return false}}return true}function getQuery(raw){var matches,type,query;query=raw;matches=query.match(/^(fn|mod|struct|enum|trait|type|const|macro)\s*:\s*/i);if(matches){type=matches[1].replace(/^const$/,"constant");query=query.substring(matches[0].length)}return{raw:raw,query:query,type:type,id:query+type}}function initSearchNav(){var hoverTimeout;var click_func=function(e){var el=e.target;while(el.tagName!=="TR"){el=el.parentNode}var dst=e.target.getElementsByTagName("a");if(dst.length<1){return}dst=dst[0];if(window.location.pathname===dst.pathname){hideSearchResults();document.location.href=dst.href}};var mouseover_func=function(e){if(mouseMovedAfterSearch){var el=e.target;while(el.tagName!=="TR"){el=el.parentNode}clearTimeout(hoverTimeout);hoverTimeout=setTimeout(function(){onEachLazy(document.getElementsByClassName("search-results"),function(e){onEachLazy(e.getElementsByClassName("result"),function(i_e){removeClass(i_e,"highlighted")})});addClass(el,"highlighted")},20)}};onEachLazy(document.getElementsByClassName("search-results"),function(e){onEachLazy(e.getElementsByClassName("result"),function(i_e){i_e.onclick=click_func;i_e.onmouseover=mouseover_func})});search_input.onkeydown=function(e){var actives=[[],[],[]];var current=0;onEachLazy(document.getElementById("results").childNodes,function(e){onEachLazy(e.getElementsByClassName("highlighted"),function(e){actives[current].push(e)});current+=1});if(e.which===38){if(!actives[currentTab].length||!actives[currentTab][0].previousElementSibling){return}addClass(actives[currentTab][0].previousElementSibling,"highlighted");removeClass(actives[currentTab][0],"highlighted");e.preventDefault()}else if(e.which===40){if(!actives[currentTab].length){var results=document.getElementById("results").childNodes;if(results.length>0){var res=results[currentTab].getElementsByClassName("result");if(res.length>0){addClass(res[0],"highlighted")}}}else if(actives[currentTab][0].nextElementSibling){addClass(actives[currentTab][0].nextElementSibling,"highlighted");removeClass(actives[currentTab][0],"highlighted")}e.preventDefault()}else if(e.which===13){if(actives[currentTab].length){document.location.href=actives[currentTab][0].getElementsByTagName("a")[0].href}}else if(e.which===9){if(e.shiftKey){printTab(currentTab>0?currentTab-1:2)}else{printTab(currentTab>1?0:currentTab+1)}e.preventDefault()}else if(e.which===16){}else if(actives[currentTab].length>0){removeClass(actives[currentTab][0],"highlighted")}}}function buildHrefAndPath(item){var displayPath;var href;var type=itemTypes[item.ty];var name=item.name;var path=item.path;if(type==="mod"){displayPath=path+"::";href=rootPath+path.replace(/::/g,"/")+"/"+name+"/index.html"}else if(type==="primitive"||type==="keyword"){displayPath="";href=rootPath+path.replace(/::/g,"/")+"/"+type+"."+name+".html"}else if(type==="externcrate"){displayPath="";href=rootPath+name+"/index.html"}else if(item.parent!==undefined){var myparent=item.parent;var anchor="#"+type+"."+name;var parentType=itemTypes[myparent.ty];var pageType=parentType;var pageName=myparent.name;if(parentType==="primitive"){displayPath=myparent.name+"::"}else if(type==="structfield"&&parentType==="variant"){var splitPath=item.path.split("::");var enumName=splitPath.pop();path=splitPath.join("::");displayPath=path+"::"+enumName+"::"+myparent.name+"::";anchor="#variant."+myparent.name+".field."+name;pageType="enum";pageName=enumName}else{displayPath=path+"::"+myparent.name+"::"}href=rootPath+path.replace(/::/g,"/")+"/"+pageType+"."+pageName+".html"+anchor}else{displayPath=item.path+"::";href=rootPath+item.path.replace(/::/g,"/")+"/"+type+"."+name+".html"}return[displayPath,href]}function escape(content){var h1=document.createElement("h1");h1.textContent=content;return h1.innerHTML}function pathSplitter(path){var tmp=""+path.replace(/::/g,"::");if(tmp.endsWith("")){return tmp.slice(0,tmp.length-6)}return tmp}function addTab(array,query,display){var extraStyle="";if(display===false){extraStyle=" style=\"display: none;\""}var output="";var duplicates={};var length=0;if(array.length>0){output="";array.forEach(function(item){var name,type;name=item.name;type=itemTypes[item.ty];if(item.is_alias!==true){if(duplicates[item.fullPath]){return}duplicates[item.fullPath]=true}length+=1;output+=""});output+="
"+""+(item.is_alias===true?(""+item.alias+"  - see "):"")+item.displayPath+""+name+""+""+""+escape(item.desc)+" 
"}else{output="
No results :(
"+"Try on DuckDuckGo?

"+"Or try looking in one of these:
"}return[output,length]}function makeTabHeader(tabNb,text,nbElems){if(currentTab===tabNb){return"
"+text+"
("+nbElems+")
"}return"
"+text+"
("+nbElems+")
"}function showResults(results){if(results.others.length===1&&getCurrentValue("rustdoc-go-to-only-result")==="true"){var elem=document.createElement("a");elem.href=results.others[0].href;elem.style.display="none";document.body.appendChild(elem);elem.click()}var query=getQuery(search_input.value);currentResults=query.id;var ret_others=addTab(results.others,query);var ret_in_args=addTab(results.in_args,query,false);var ret_returned=addTab(results.returned,query,false);var output="

Results for "+escape(query.query)+(query.type?" (type: "+escape(query.type)+")":"")+"

"+"
"+makeTabHeader(0,"In Names",ret_others[1])+makeTabHeader(1,"In Parameters",ret_in_args[1])+makeTabHeader(2,"In Return Types",ret_returned[1])+"
"+ret_others[0]+ret_in_args[0]+ret_returned[0]+"
";var search=getSearchElement();search.innerHTML=output;showSearchResults(search);var tds=search.getElementsByTagName("td");var td_width=0;if(tds.length>0){td_width=tds[0].offsetWidth}var width=search.offsetWidth-40-td_width;onEachLazy(search.getElementsByClassName("desc"),function(e){e.style.width=width+"px"});initSearchNav();var elems=document.getElementById("titles").childNodes;elems[0].onclick=function(){printTab(0)};elems[1].onclick=function(){printTab(1)};elems[2].onclick=function(){printTab(2)};printTab(currentTab)}function execSearch(query,searchWords,filterCrates){function getSmallest(arrays,positions,notDuplicates){var start=null;for(var it=0;itpositions[it]&&(start===null||start>arrays[it][positions[it]].lev)&&!notDuplicates[arrays[it][positions[it]].fullPath]){start=arrays[it][positions[it]].lev}}return start}function mergeArrays(arrays){var ret=[];var positions=[];var notDuplicates={};for(var x=0;xpositions[x]&&arrays[x][positions[x]].lev===smallest&&!notDuplicates[arrays[x][positions[x]].fullPath]){ret.push(arrays[x][positions[x]]);notDuplicates[arrays[x][positions[x]].fullPath]=true;positions[x]+=1}}}return ret}var queries=query.raw.split(",");var results={"in_args":[],"returned":[],"others":[],};for(var i=0;i1){return{"in_args":mergeArrays(results.in_args),"returned":mergeArrays(results.returned),"others":mergeArrays(results.others),}}return{"in_args":results.in_args[0],"returned":results.returned[0],"others":results.others[0],}}function getFilterCrates(){var elem=document.getElementById("crate-search");if(elem&&elem.value!=="All crates"&&hasOwnProperty(rawSearchIndex,elem.value)){return elem.value}return undefined}function search(e,forced){var params=getQueryStringParams();var query=getQuery(search_input.value.trim());if(e){e.preventDefault()}if(query.query.length===0){return}if(forced!==true&&query.id===currentResults){if(query.query.length>0){putBackSearch(search_input)}return}document.title="Results for "+query.query+" - Rust";if(browserSupportsHistoryApi()){if(!history.state&&!params.search){history.pushState(query,"","?search="+encodeURIComponent(query.raw))}else{history.replaceState(query,"","?search="+encodeURIComponent(query.raw))}}var filterCrates=getFilterCrates();showResults(execSearch(query,index,filterCrates))}function buildIndex(rawSearchIndex){searchIndex=[];var searchWords=[];var i;var currentIndex=0;for(var crate in rawSearchIndex){if(!hasOwnProperty(rawSearchIndex,crate)){continue}var crateSize=0;searchWords.push(crate);searchIndex.push({crate:crate,ty:1,name:crate,path:"",desc:rawSearchIndex[crate].doc,type:null,});currentIndex+=1;var items=rawSearchIndex[crate].i;var paths=rawSearchIndex[crate].p;var aliases=rawSearchIndex[crate].a;var len=paths.length;for(i=0;i0){search_input.value=params.search;search(e)}else{search_input.value="";hideSearchResults()}})}search()}index=buildIndex(rawSearchIndex);startSearch();if(rootPath==="../"||rootPath==="./"){var sidebar=document.getElementsByClassName("sidebar-elems")[0];if(sidebar){var div=document.createElement("div");div.className="block crate";div.innerHTML="

Crates

";var ul=document.createElement("ul");div.appendChild(ul);var crates=[];for(var crate in rawSearchIndex){if(!hasOwnProperty(rawSearchIndex,crate)){continue}crates.push(crate)}crates.sort();for(var i=0;i"+""+"
"+code.outerHTML+"
";list.appendChild(display)}}};if(window.pending_implementors){window.register_implementors(window.pending_implementors)}function labelForToggleButton(sectionIsCollapsed){if(sectionIsCollapsed){return"+"}return"\u2212"}function onEveryMatchingChild(elem,className,func){if(elem&&className&&func){var length=elem.childNodes.length;var nodes=elem.childNodes;for(var i=0;i"+labelForToggleButton(sectionIsCollapsed)+"
]";return toggle}function createToggle(toggle,otherMessage,fontSize,extraClass,show){var span=document.createElement("span");span.className="toggle-label";if(show){span.style.display="none"}if(!otherMessage){span.innerHTML=" Expand description"}else{span.innerHTML=otherMessage}if(fontSize){span.style.fontSize=fontSize}var mainToggle=toggle.cloneNode(true);mainToggle.appendChild(span);var wrapper=document.createElement("div");wrapper.className="toggle-wrapper";if(!show){addClass(wrapper,"collapsed");var inner=mainToggle.getElementsByClassName("inner");if(inner&&inner.length>0){inner[0].innerHTML="+"}}if(extraClass){addClass(wrapper,extraClass)}wrapper.appendChild(mainToggle);return wrapper}(function(){var toggle=createSimpleToggle(false);var hideMethodDocs=getCurrentValue("rustdoc-auto-hide-method-docs")==="true";var pageId=getPageId();var func=function(e){var next=e.nextElementSibling;if(!next){return}if(hasClass(next,"docblock")===true||(hasClass(next,"stability")===true&&hasClass(next.nextElementSibling,"docblock")===true)){var newToggle=toggle.cloneNode(true);insertAfter(newToggle,e.childNodes[e.childNodes.length-1]);if(hideMethodDocs===true&&hasClass(e,"method")===true){collapseDocs(newToggle,"hide",pageId)}}};var funcImpl=function(e){var next=e.nextElementSibling;if(next&&hasClass(next,"docblock")){next=next.nextElementSibling}if(!next){return}if(hasClass(e,"impl")&&(next.getElementsByClassName("method").length>0||next.getElementsByClassName("associatedconstant").length>0)){insertAfter(toggle.cloneNode(true),e.childNodes[e.childNodes.length-1])}};onEachLazy(document.getElementsByClassName("method"),func);onEachLazy(document.getElementsByClassName("associatedconstant"),func);onEachLazy(document.getElementsByClassName("impl"),funcImpl);var impl_call=function(){};if(hideMethodDocs===true){impl_call=function(e,newToggle){if(e.id.match(/^impl(?:-\d+)?$/)===null){if(hasClass(e,"impl")===true){collapseDocs(newToggle,"hide",pageId)}}}}var newToggle=document.createElement("a");newToggle.href="javascript:void(0)";newToggle.className="collapse-toggle hidden-default collapsed";newToggle.innerHTML="["+labelForToggleButton(true)+"] Show hidden undocumented items";function toggleClicked(){if(hasClass(this,"collapsed")){removeClass(this,"collapsed");onEachLazy(this.parentNode.getElementsByClassName("hidden"),function(x){if(hasClass(x,"content")===false){removeClass(x,"hidden");addClass(x,"x")}},true);this.innerHTML="["+labelForToggleButton(false)+"] Hide undocumented items"}else{addClass(this,"collapsed");onEachLazy(this.parentNode.getElementsByClassName("x"),function(x){if(hasClass(x,"content")===false){addClass(x,"hidden");removeClass(x,"x")}},true);this.innerHTML="["+labelForToggleButton(true)+"] Show hidden undocumented items"}}onEachLazy(document.getElementsByClassName("impl-items"),function(e){onEachLazy(e.getElementsByClassName("associatedconstant"),func);var hiddenElems=e.getElementsByClassName("hidden");var needToggle=false;var hlength=hiddenElems.length;for(var i=0;iLoading search results...";showSearchResults(search)}var sidebar_menu=document.getElementsByClassName("sidebar-menu")[0];if(sidebar_menu){sidebar_menu.onclick=function(){var sidebar=document.getElementsByClassName("sidebar")[0];if(hasClass(sidebar,"mobile")===true){hideSidebar()}else{showSidebar()}}}window.onresize=function(){hideSidebar()};autoCollapse(getPageId(),getCurrentValue("rustdoc-collapse")==="true");if(window.location.hash&&window.location.hash.length>0){expandSection(window.location.hash.replace(/^#/,""))}if(main){onEachLazy(main.getElementsByClassName("loading-content"),function(e){e.remove()});onEachLazy(main.childNodes,function(e){if(e.tagName==="H2"||e.tagName==="H3"){var nextTagName=e.nextElementSibling.tagName;if(nextTagName=="H2"||nextTagName=="H3"){e.nextElementSibling.style.display="flex"}else{e.nextElementSibling.style.display="block"}}})}window.addSearchOptions=function(crates){var elem=document.getElementById("crate-search");if(!elem){return}var crates_text=[];if(Object.keys(crates).length>1){for(var crate in crates){if(hasOwnProperty(crates,crate)){crates_text.push(crate)}}}crates_text.sort(function(a,b){var lower_a=a.toLowerCase();var lower_b=b.toLowerCase();if(lower_alower_b){return 1}return 0});var savedCrate=getCurrentValue("rustdoc-saved-filter-crate");for(var i=0;i"
"+x[0]+"
"+x[1]+"
").join("");var div_shortcuts=document.createElement("div");addClass(div_shortcuts,"shortcuts");div_shortcuts.innerHTML="

Keyboard Shortcuts

"+shortcuts+"
";var infos=["Prefix searches with a type followed by a colon (e.g., fn:) to \ + restrict the search to a given type.","Accepted types are: fn, mod, struct, \ + enum, trait, type, macro, \ + and const.","Search functions by type signature (e.g., vec -> usize or \ + * -> vec)","Search multiple things at once by splitting your query with comma (e.g., \ + str,u8 or String,struct:Vec,test)","You can look for items with an exact name by putting double quotes around \ + your request: \"string\"","Look for items inside another one by searching for a path: vec::Vec",].map(x=>"

"+x+"

").join("");var div_infos=document.createElement("div");addClass(div_infos,"infos");div_infos.innerHTML="

Search Tricks

"+infos;container.appendChild(div_shortcuts);container.appendChild(div_infos);popup.appendChild(container);insertAfter(popup,getSearchElement())}onHashChange(null);window.onhashchange=onHashChange;buildHelperPopup()}());window.onunload=function(){} \ No newline at end of file diff --git a/normalize.css b/normalize.css new file mode 100644 index 000000000..45b6cb263 --- /dev/null +++ b/normalize.css @@ -0,0 +1,2 @@ +/*! normalize.css v3.0.0 | MIT License | git.io/normalize */ +html{font-family:sans-serif;-ms-text-size-adjust:100%;-webkit-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background:transparent}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:bold}dfn{font-style:italic}h1{font-size:2em;margin:.67em 0}mark{background:#ff0;color:#000}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{-moz-box-sizing:content-box;box-sizing:content-box;height:0}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{color:inherit;font:inherit;margin:0}button{overflow:visible}button,select{text-transform:none}button,html input[type="button"],input[type="reset"],input[type="submit"]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}input{line-height:normal}input[type="checkbox"],input[type="radio"]{box-sizing:border-box;padding:0}input[type="number"]::-webkit-inner-spin-button,input[type="number"]::-webkit-outer-spin-button{height:auto}input[type="search"]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}input[type="search"]::-webkit-search-cancel-button,input[type="search"]::-webkit-search-decoration{-webkit-appearance:none}fieldset{border:1px solid silver;margin:0 2px;padding:.35em .625em .75em}legend{border:0;padding:0}textarea{overflow:auto}optgroup{font-weight:bold}table{border-collapse:collapse;border-spacing:0}td,th{padding:0} \ No newline at end of file diff --git a/noscript.css b/noscript.css new file mode 100644 index 000000000..351b41cf8 --- /dev/null +++ b/noscript.css @@ -0,0 +1 @@ +#main>h2+div,#main>h2+h3,#main>h3+div{display:block;}.loading-content{display:none;}#main>h2+div,#main>h3+div{display:block;}#main>h2+h3{display:flex;} \ No newline at end of file diff --git a/packed_simd/all.html b/packed_simd/all.html new file mode 100644 index 000000000..f82e72381 --- /dev/null +++ b/packed_simd/all.html @@ -0,0 +1,3 @@ +List of all items in this crate

[] + + List of all items

Structs

Traits

Macros

Typedefs

\ No newline at end of file diff --git a/packed_simd/api/cast/trait.Cast.html b/packed_simd/api/cast/trait.Cast.html new file mode 100644 index 000000000..c9d8178d1 --- /dev/null +++ b/packed_simd/api/cast/trait.Cast.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../packed_simd/trait.Cast.html...

+ + + \ No newline at end of file diff --git a/packed_simd/api/cast/trait.FromCast.html b/packed_simd/api/cast/trait.FromCast.html new file mode 100644 index 000000000..c05d12a8f --- /dev/null +++ b/packed_simd/api/cast/trait.FromCast.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../packed_simd/trait.FromCast.html...

+ + + \ No newline at end of file diff --git a/packed_simd/api/into_bits/trait.FromBits.html b/packed_simd/api/into_bits/trait.FromBits.html new file mode 100644 index 000000000..2c23e00fe --- /dev/null +++ b/packed_simd/api/into_bits/trait.FromBits.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../packed_simd/trait.FromBits.html...

+ + + \ No newline at end of file diff --git a/packed_simd/api/into_bits/trait.IntoBits.html b/packed_simd/api/into_bits/trait.IntoBits.html new file mode 100644 index 000000000..28f067d7b --- /dev/null +++ b/packed_simd/api/into_bits/trait.IntoBits.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../../packed_simd/trait.IntoBits.html...

+ + + \ No newline at end of file diff --git a/packed_simd/index.html b/packed_simd/index.html new file mode 100644 index 000000000..1e310e418 --- /dev/null +++ b/packed_simd/index.html @@ -0,0 +1,290 @@ +packed_simd - Rust

[][src]Crate packed_simd

Portable packed SIMD vectors

+

This crate is proposed for stabilization as std::packed_simd in RFC2366: +std::simd .

+

The examples available in the +examples/ +sub-directory of the crate showcase how to use the library in practice.

+

Table of contents

+ +

Introduction

+

This crate exports Simd<[T; N]>: a packed vector of N +elements of type T as well as many type aliases for this type: for +example, f32x4, which is just an alias for Simd<[f32; 4]>.

+

The operations on packed vectors are, by default, "vertical", that is, they +are applied to each vector lane in isolation of the others:

+ +
+let a = i32x4::new(1, 2, 3, 4);
+let b = i32x4::new(5, 6, 7, 8);
+assert_eq!(a + b, i32x4::new(6, 8, 10, 12));
+

Many "horizontal" operations are also provided:

+ +
+assert_eq!(a.wrapping_sum(), 10);
+

In virtually all architectures vertical operations are fast, while +horizontal operations are, by comparison, much slower. That is, the +most portably-efficient way of performing a reduction over a slice +is to collect the results into a vector using vertical operations, +and performing a single horizontal operation at the end:

+ +
+fn reduce(x: &[i32]) -> i32 {
+    assert!(x.len() % 4 == 0);
+    let mut sum = i32x4::splat(0); // [0, 0, 0, 0]
+    for i in (0..x.len()).step_by(4) {
+        sum += i32x4::from_slice_unaligned(&x[i..]);
+    }
+    sum.wrapping_sum()
+}
+
+let x = [0, 1, 2, 3, 4, 5, 6, 7];
+assert_eq!(reduce(&x), 28);
+

Vector types

+

The vector type aliases are named according to the following scheme:

+
+

{element_type}x{number_of_lanes} == Simd<[element_type; number_of_lanes]>

+
+

where the following element types are supported:

+
    +
  • i{element_width}: signed integer
  • +
  • u{element_width}: unsigned integer
  • +
  • f{element_width}: float
  • +
  • m{element_width}: mask (see below)
  • +
  • *{const,mut} T: const and mut pointers
  • +
+

Basic operations

+
+// Sets all elements to `0`:
+let a = i32x4::splat(0);
+
+// Reads a vector from a slice:
+let mut arr = [0, 0, 0, 1, 2, 3, 4, 5];
+let b = i32x4::from_slice_unaligned(&arr);
+
+// Reads the 4-th element of a vector:
+assert_eq!(b.extract(3), 1);
+
+// Returns a new vector where the 4-th element is replaced with `1`:
+let a = a.replace(3, 1);
+assert_eq!(a, b);
+
+// Writes a vector to a slice:
+let a = a.replace(2, 1);
+a.write_to_slice_unaligned(&mut arr[4..]);
+assert_eq!(arr, [0, 0, 0, 1, 0, 0, 1, 1]);
+

Conditional operations

+

One often needs to perform an operation on some lanes of the vector. Vector +masks, like m32x4, allow selecting on which vector lanes an operation is +to be performed:

+ +
+let a = i32x4::new(1, 1, 2, 2);
+
+// Add `1` to the first two lanes of the vector.
+let m = m16x4::new(true, true, false, false);
+let a = m.select(a + 1, a);
+assert_eq!(a, i32x4::splat(2));
+

The elements of a vector mask are either true or false. Here true +means that a lane is "selected", while false means that a lane is not +selected.

+

All vector masks implement a mask.select(a: T, b: T) -> T method that +works on all vectors that have the same number of lanes as the mask. The +resulting vector contains the elements of a for those lanes for which the +mask is true, and the elements of b otherwise.

+

The example constructs a mask with the first two lanes set to true and +the last two lanes set to false. This selects the first two lanes of a + 1 and the last two lanes of a, producing a vector where the first two +lanes have been incremented by 1.

+
+

note: mask select can be used on vector types that have the same number +of lanes as the mask. The example shows this by using m16x4 instead +of m32x4. It is typically more performant to use a mask element +width equal to the element width of the vectors being operated upon. +This is, however, not true for 512-bit wide vectors when targetting +AVX-512, where the most efficient masks use only 1-bit per element.

+
+

All vertical comparison operations returns masks:

+ +
+let a = i32x4::new(1, 1, 3, 3);
+let b = i32x4::new(2, 2, 0, 0);
+
+// ge: >= (Greater Eequal; see also lt, le, gt, eq, ne).
+let m = a.ge(i32x4::splat(2));
+
+if m.any() {
+    // all / any / none allow coherent control flow
+    let d = m.select(a, b);
+    assert_eq!(d, i32x4::new(2, 2, 3, 3));
+}
+

Conversions

+
    +
  • +

    lossless widening conversions: From/Into are implemented for +vectors with the same number of lanes when the conversion is value +preserving (same as in std).

    +
  • +
  • +

    safe bitwise conversions: The cargo feature into_bits provides the +IntoBits/FromBits traits (x.into_bits()). These perform safe bitwise +transmutes when all bit patterns of the source type are valid bit +patterns of the target type and are also implemented for the +architecture-specific vector types of std::arch. For example, let x: u8x8 = m8x8::splat(true).into_bits(); is provided because all m8x8 bit +patterns are valid u8x8 bit patterns. However, the opposite is not +true, not all u8x8 bit patterns are valid m8x8 bit-patterns, so this +operation cannot be peformed safely using x.into_bits(); one needs to +use unsafe { crate::mem::transmute(x) } for that, making sure that the +value in the u8x8 is a valid bit-pattern of m8x8.

    +
  • +
  • +

    numeric casts (as): are peformed using FromCast/Cast +(x.cast()), just like as:

    +
      +
    • +

      casting integer vectors whose lane types have the same size (e.g. +i32xN -> u32xN) is a no-op,

      +
    • +
    • +

      casting from a larger integer to a smaller integer (e.g. u32xN -> +u8xN) will truncate,

      +
    • +
    • +

      casting from a smaller integer to a larger integer (e.g. u8xN -> +u32xN) will:

      +
        +
      • zero-extend if the source is unsigned, or
      • +
      • sign-extend if the source is signed,
      • +
      +
    • +
    • +

      casting from a float to an integer will round the float towards +zero,

      +
    • +
    • +

      casting from an integer to float will produce the floating point +representation of the integer, rounding to nearest, ties to even,

      +
    • +
    • +

      casting from an f32 to an f64 is perfect and lossless,

      +
    • +
    • +

      casting from an f64 to an f32 rounds to nearest, ties to even.

      +
    • +
    +

    Numeric casts are not very "precise": sometimes lossy, sometimes value +preserving, etc.

    +
  • +
+

Macros

+
shuffle

Shuffles vector elements.

+

Structs

+
LexicographicallyOrdered

Wrapper over T implementing a lexicoraphical order via the PartialOrd +and/or Ord traits.

+
Simd

Packed SIMD vector type.

+
m8

8-bit wide mask.

+
m16

16-bit wide mask.

+
m32

32-bit wide mask.

+
m64

64-bit wide mask.

+
m128

128-bit wide mask.

+
msize

isize-wide mask.

+

Traits

+
Cast

Numeric cast from Self to T.

+
FromBits

Safe lossless bitwise conversion from T to Self.

+
FromCast

Numeric cast from T to Self.

+
IntoBits

Safe lossless bitwise conversion from Self to T.

+
Mask

This trait is implemented by all mask types

+
SimdArray

Trait implemented by arrays that can be SIMD types.

+
SimdVector

This trait is implemented by all SIMD vector types.

+

Type Definitions

+
cptrx2

A vector with 2 *const T lanes

+
cptrx4

A vector with 4 *const T lanes

+
cptrx8

A vector with 8 *const T lanes

+
f32x2

A 64-bit vector with 2 f32 lanes.

+
f32x4

A 128-bit vector with 4 f32 lanes.

+
f32x8

A 256-bit vector with 8 f32 lanes.

+
f32x16

A 512-bit vector with 16 f32 lanes.

+
f64x2

A 128-bit vector with 2 f64 lanes.

+
f64x4

A 256-bit vector with 4 f64 lanes.

+
f64x8

A 512-bit vector with 8 f64 lanes.

+
i128x1

A 128-bit vector with 1 i128 lane.

+
i128x2

A 256-bit vector with 2 i128 lanes.

+
i128x4

A 512-bit vector with 4 i128 lanes.

+
i16x2

A 32-bit vector with 2 i16 lanes.

+
i16x4

A 64-bit vector with 4 i16 lanes.

+
i16x8

A 128-bit vector with 8 i16 lanes.

+
i16x16

A 256-bit vector with 16 i16 lanes.

+
i16x32

A 512-bit vector with 32 i16 lanes.

+
i32x2

A 64-bit vector with 2 i32 lanes.

+
i32x4

A 128-bit vector with 4 i32 lanes.

+
i32x8

A 256-bit vector with 8 i32 lanes.

+
i32x16

A 512-bit vector with 16 i32 lanes.

+
i64x2

A 128-bit vector with 2 i64 lanes.

+
i64x4

A 256-bit vector with 4 i64 lanes.

+
i64x8

A 512-bit vector with 8 i64 lanes.

+
i8x2

A 16-bit vector with 2 i8 lanes.

+
i8x4

A 32-bit vector with 4 i8 lanes.

+
i8x8

A 64-bit vector with 8 i8 lanes.

+
i8x16

A 128-bit vector with 16 i8 lanes.

+
i8x32

A 256-bit vector with 32 i8 lanes.

+
i8x64

A 512-bit vector with 64 i8 lanes.

+
isizex2

A vector with 2 isize lanes.

+
isizex4

A vector with 4 isize lanes.

+
isizex8

A vector with 4 isize lanes.

+
m128x1

A 128-bit vector mask with 1 m128 lane.

+
m128x2

A 256-bit vector mask with 2 m128 lanes.

+
m128x4

A 512-bit vector mask with 4 m128 lanes.

+
m16x2

A 32-bit vector mask with 2 m16 lanes.

+
m16x4

A 64-bit vector mask with 4 m16 lanes.

+
m16x8

A 128-bit vector mask with 8 m16 lanes.

+
m16x16

A 256-bit vector mask with 16 m16 lanes.

+
m16x32

A 512-bit vector mask with 32 m16 lanes.

+
m32x2

A 64-bit vector mask with 2 m32 lanes.

+
m32x4

A 128-bit vector mask with 4 m32 lanes.

+
m32x8

A 256-bit vector mask with 8 m32 lanes.

+
m32x16

A 512-bit vector mask with 16 m32 lanes.

+
m64x2

A 128-bit vector mask with 2 m64 lanes.

+
m64x4

A 256-bit vector mask with 4 m64 lanes.

+
m64x8

A 512-bit vector mask with 8 m64 lanes.

+
m8x2

A 16-bit vector mask with 2 m8 lanes.

+
m8x4

A 32-bit vector mask with 4 m8 lanes.

+
m8x8

A 64-bit vector mask with 8 m8 lanes.

+
m8x16

A 128-bit vector mask with 16 m8 lanes.

+
m8x32

A 256-bit vector mask with 32 m8 lanes.

+
m8x64

A 512-bit vector mask with 64 m8 lanes.

+
mptrx2

A vector with 2 *mut T lanes

+
mptrx4

A vector with 4 *mut T lanes

+
mptrx8

A vector with 8 *mut T lanes

+
msizex2

A vector mask with 2 msize lanes.

+
msizex4

A vector mask with 4 msize lanes.

+
msizex8

A vector mask with 8 msize lanes.

+
u128x1

A 128-bit vector with 1 u128 lane.

+
u128x2

A 256-bit vector with 2 u128 lanes.

+
u128x4

A 512-bit vector with 4 u128 lanes.

+
u16x2

A 32-bit vector with 2 u16 lanes.

+
u16x4

A 64-bit vector with 4 u16 lanes.

+
u16x8

A 128-bit vector with 8 u16 lanes.

+
u16x16

A 256-bit vector with 16 u16 lanes.

+
u16x32

A 512-bit vector with 32 u16 lanes.

+
u32x2

A 64-bit vector with 2 u32 lanes.

+
u32x4

A 128-bit vector with 4 u32 lanes.

+
u32x8

A 256-bit vector with 8 u32 lanes.

+
u32x16

A 512-bit vector with 16 u32 lanes.

+
u64x2

A 128-bit vector with 2 u64 lanes.

+
u64x4

A 256-bit vector with 4 u64 lanes.

+
u64x8

A 512-bit vector with 8 u64 lanes.

+
u8x2

A 16-bit vector with 2 u8 lanes.

+
u8x4

A 32-bit vector with 4 u8 lanes.

+
u8x8

A 64-bit vector with 8 u8 lanes.

+
u8x16

A 128-bit vector with 16 u8 lanes.

+
u8x32

A 256-bit vector with 32 u8 lanes.

+
u8x64

A 512-bit vector with 64 u8 lanes.

+
usizex2

A vector with 2 usize lanes.

+
usizex4

A vector with 4 usize lanes.

+
usizex8

A vector with 8 usize lanes.

+
\ No newline at end of file diff --git a/packed_simd/macro.shuffle!.html b/packed_simd/macro.shuffle!.html new file mode 100644 index 000000000..8ba23da2e --- /dev/null +++ b/packed_simd/macro.shuffle!.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to macro.shuffle.html...

+ + + \ No newline at end of file diff --git a/packed_simd/macro.shuffle.html b/packed_simd/macro.shuffle.html new file mode 100644 index 000000000..d3a817c09 --- /dev/null +++ b/packed_simd/macro.shuffle.html @@ -0,0 +1,92 @@ +packed_simd::shuffle - Rust

[][src]Macro packed_simd::shuffle

+macro_rules! shuffle {
+    ($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr]) => { ... };
+    ($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr, $l2:expr, $l3:expr]) => { ... };
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr]) => { ... };
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr,
+      $l8:expr, $l9:expr, $l10:expr, $l11:expr,
+      $l12:expr, $l13:expr, $l14:expr, $l15:expr]) => { ... };
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr,
+      $l8:expr, $l9:expr, $l10:expr, $l11:expr,
+      $l12:expr, $l13:expr, $l14:expr, $l15:expr,
+      $l16:expr, $l17:expr, $l18:expr, $l19:expr,
+      $l20:expr, $l21:expr, $l22:expr, $l23:expr,
+      $l24:expr, $l25:expr, $l26:expr, $l27:expr,
+      $l28:expr, $l29:expr, $l30:expr, $l31:expr]) => { ... };
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr,
+      $l8:expr, $l9:expr, $l10:expr, $l11:expr,
+      $l12:expr, $l13:expr, $l14:expr, $l15:expr,
+      $l16:expr, $l17:expr, $l18:expr, $l19:expr,
+      $l20:expr, $l21:expr, $l22:expr, $l23:expr,
+      $l24:expr, $l25:expr, $l26:expr, $l27:expr,
+      $l28:expr, $l29:expr, $l30:expr, $l31:expr,
+      $l32:expr, $l33:expr, $l34:expr, $l35:expr,
+      $l36:expr, $l37:expr, $l38:expr, $l39:expr,
+      $l40:expr, $l41:expr, $l42:expr, $l43:expr,
+      $l44:expr, $l45:expr, $l46:expr, $l47:expr,
+      $l48:expr, $l49:expr, $l50:expr, $l51:expr,
+      $l52:expr, $l53:expr, $l54:expr, $l55:expr,
+      $l56:expr, $l57:expr, $l58:expr, $l59:expr,
+      $l60:expr, $l61:expr, $l62:expr, $l63:expr]) => { ... };
+    ($vec:expr, [$($l:expr),*]) => { ... };
+}
+

Shuffles vector elements.

+

This macro returns a new vector that contains a shuffle of the elements in +one (shuffle!(vec, [indices...])) or two (shuffle!(vec0, vec1, [indices...])) input vectors.

+

The type of vec0 and vec1 must be equal, and the element type of the +resulting vector is the element type of the input vector.

+

The number of indices must be a power-of-two in range [0, 64), since +currently, the largest vector supported by the library has 64 lanes. The +length of the resulting vector equals the number of indices provided.

+

The indices must be in range [0, M * N) where M is the number of input +vectors (1 or 2) and N is the number of lanes of the input vectors. +The indices i in range [0, N) refer to the i-th element of vec0, +while the indices in range [N, 2*N) refer to the i - N-th element of +vec1.

+

Examples

+

Shuffling elements of two vectors:

+ +
+// Shuffle allows reordering the elements:
+let x = i32x4::new(1, 2, 3, 4);
+let y = i32x4::new(5, 6, 7, 8);
+let r = shuffle!(x, y, [4, 0, 5, 1]);
+assert_eq!(r, i32x4::new(5, 1, 6, 2));
+
+// The resulting vector can als be smaller than the input:
+let r = shuffle!(x, y, [1, 6]);
+assert_eq!(r, i32x2::new(2, 7));
+
+// Or larger:
+let r = shuffle!(x, y, [1, 3, 4, 2, 1, 7, 2, 2]);
+assert_eq!(r, i32x8::new(2, 4, 5, 3, 2, 8, 3, 3));
+// At most 2 * the number of lanes in the input vector.
+

Shuffling elements of one vector:

+ +
+// Shuffle allows reordering the elements of a vector:
+let x = i32x4::new(1, 2, 3, 4);
+let r = shuffle!(x, [2, 1, 3, 0]);
+assert_eq!(r, i32x4::new(3, 2, 4, 1));
+
+// The resulting vector can be smaller than the input:
+let r = shuffle!(x, [1, 3]);
+assert_eq!(r, i32x2::new(2, 4));
+
+// Equal:
+let r = shuffle!(x, [1, 3, 2, 0]);
+assert_eq!(r, i32x4::new(2, 4, 3, 1));
+
+// Or larger:
+let r = shuffle!(x, [1, 3, 2, 2, 1, 3, 2, 2]);
+assert_eq!(r, i32x8::new(2, 4, 3, 3, 2, 4, 3, 3));
+// At most 2 * the number of lanes in the input vector.
+
\ No newline at end of file diff --git a/packed_simd/masks/struct.m128.html b/packed_simd/masks/struct.m128.html new file mode 100644 index 000000000..16f94f2b2 --- /dev/null +++ b/packed_simd/masks/struct.m128.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/struct.m128.html...

+ + + \ No newline at end of file diff --git a/packed_simd/masks/struct.m16.html b/packed_simd/masks/struct.m16.html new file mode 100644 index 000000000..b325bded2 --- /dev/null +++ b/packed_simd/masks/struct.m16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/struct.m16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/masks/struct.m32.html b/packed_simd/masks/struct.m32.html new file mode 100644 index 000000000..1227cfb92 --- /dev/null +++ b/packed_simd/masks/struct.m32.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/struct.m32.html...

+ + + \ No newline at end of file diff --git a/packed_simd/masks/struct.m64.html b/packed_simd/masks/struct.m64.html new file mode 100644 index 000000000..6df1ad61d --- /dev/null +++ b/packed_simd/masks/struct.m64.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/struct.m64.html...

+ + + \ No newline at end of file diff --git a/packed_simd/masks/struct.m8.html b/packed_simd/masks/struct.m8.html new file mode 100644 index 000000000..a05f8ec22 --- /dev/null +++ b/packed_simd/masks/struct.m8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/struct.m8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/masks/struct.msize.html b/packed_simd/masks/struct.msize.html new file mode 100644 index 000000000..57f8b1276 --- /dev/null +++ b/packed_simd/masks/struct.msize.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/struct.msize.html...

+ + + \ No newline at end of file diff --git a/packed_simd/sealed/trait.Mask.html b/packed_simd/sealed/trait.Mask.html new file mode 100644 index 000000000..71611d4ee --- /dev/null +++ b/packed_simd/sealed/trait.Mask.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/trait.Mask.html...

+ + + \ No newline at end of file diff --git a/packed_simd/sealed/trait.Simd.html b/packed_simd/sealed/trait.Simd.html new file mode 100644 index 000000000..d78b1b9c3 --- /dev/null +++ b/packed_simd/sealed/trait.Simd.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/trait.SimdVector.html...

+ + + \ No newline at end of file diff --git a/packed_simd/sealed/trait.SimdArray.html b/packed_simd/sealed/trait.SimdArray.html new file mode 100644 index 000000000..374fbb0e4 --- /dev/null +++ b/packed_simd/sealed/trait.SimdArray.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/trait.SimdArray.html...

+ + + \ No newline at end of file diff --git a/packed_simd/sidebar-items.js b/packed_simd/sidebar-items.js new file mode 100644 index 000000000..7f50794bc --- /dev/null +++ b/packed_simd/sidebar-items.js @@ -0,0 +1 @@ +initSidebarItems({"macro":[["shuffle","Shuffles vector elements."]],"struct":[["LexicographicallyOrdered","Wrapper over `T` implementing a lexicoraphical order via the `PartialOrd` and/or `Ord` traits."],["Simd","Packed SIMD vector type."],["m128","128-bit wide mask."],["m16","16-bit wide mask."],["m32","32-bit wide mask."],["m64","64-bit wide mask."],["m8","8-bit wide mask."],["msize","isize-wide mask."]],"trait":[["Cast","Numeric cast from `Self` to `T`."],["FromBits","Safe lossless bitwise conversion from `T` to `Self`."],["FromCast","Numeric cast from `T` to `Self`."],["IntoBits","Safe lossless bitwise conversion from `Self` to `T`."],["Mask","This trait is implemented by all mask types"],["SimdArray","Trait implemented by arrays that can be SIMD types."],["SimdVector","This trait is implemented by all SIMD vector types."]],"type":[["cptrx2","A vector with 2 `*const T` lanes"],["cptrx4","A vector with 4 `*const T` lanes"],["cptrx8","A vector with 8 `*const T` lanes"],["f32x16","A 512-bit vector with 16 `f32` lanes."],["f32x2","A 64-bit vector with 2 `f32` lanes."],["f32x4","A 128-bit vector with 4 `f32` lanes."],["f32x8","A 256-bit vector with 8 `f32` lanes."],["f64x2","A 128-bit vector with 2 `f64` lanes."],["f64x4","A 256-bit vector with 4 `f64` lanes."],["f64x8","A 512-bit vector with 8 `f64` lanes."],["i128x1","A 128-bit vector with 1 `i128` lane."],["i128x2","A 256-bit vector with 2 `i128` lanes."],["i128x4","A 512-bit vector with 4 `i128` lanes."],["i16x16","A 256-bit vector with 16 `i16` lanes."],["i16x2","A 32-bit vector with 2 `i16` lanes."],["i16x32","A 512-bit vector with 32 `i16` lanes."],["i16x4","A 64-bit vector with 4 `i16` lanes."],["i16x8","A 128-bit vector with 8 `i16` lanes."],["i32x16","A 512-bit vector with 16 `i32` lanes."],["i32x2","A 64-bit vector with 2 `i32` lanes."],["i32x4","A 128-bit vector with 4 `i32` lanes."],["i32x8","A 256-bit vector with 8 `i32` lanes."],["i64x2","A 128-bit vector with 2 `i64` lanes."],["i64x4","A 256-bit vector with 4 `i64` lanes."],["i64x8","A 512-bit vector with 8 `i64` lanes."],["i8x16","A 128-bit vector with 16 `i8` lanes."],["i8x2","A 16-bit vector with 2 `i8` lanes."],["i8x32","A 256-bit vector with 32 `i8` lanes."],["i8x4","A 32-bit vector with 4 `i8` lanes."],["i8x64","A 512-bit vector with 64 `i8` lanes."],["i8x8","A 64-bit vector with 8 `i8` lanes."],["isizex2","A vector with 2 `isize` lanes."],["isizex4","A vector with 4 `isize` lanes."],["isizex8","A vector with 4 `isize` lanes."],["m128x1","A 128-bit vector mask with 1 `m128` lane."],["m128x2","A 256-bit vector mask with 2 `m128` lanes."],["m128x4","A 512-bit vector mask with 4 `m128` lanes."],["m16x16","A 256-bit vector mask with 16 `m16` lanes."],["m16x2","A 32-bit vector mask with 2 `m16` lanes."],["m16x32","A 512-bit vector mask with 32 `m16` lanes."],["m16x4","A 64-bit vector mask with 4 `m16` lanes."],["m16x8","A 128-bit vector mask with 8 `m16` lanes."],["m32x16","A 512-bit vector mask with 16 `m32` lanes."],["m32x2","A 64-bit vector mask with 2 `m32` lanes."],["m32x4","A 128-bit vector mask with 4 `m32` lanes."],["m32x8","A 256-bit vector mask with 8 `m32` lanes."],["m64x2","A 128-bit vector mask with 2 `m64` lanes."],["m64x4","A 256-bit vector mask with 4 `m64` lanes."],["m64x8","A 512-bit vector mask with 8 `m64` lanes."],["m8x16","A 128-bit vector mask with 16 `m8` lanes."],["m8x2","A 16-bit vector mask with 2 `m8` lanes."],["m8x32","A 256-bit vector mask with 32 `m8` lanes."],["m8x4","A 32-bit vector mask with 4 `m8` lanes."],["m8x64","A 512-bit vector mask with 64 `m8` lanes."],["m8x8","A 64-bit vector mask with 8 `m8` lanes."],["mptrx2","A vector with 2 `*mut T` lanes"],["mptrx4","A vector with 4 `*mut T` lanes"],["mptrx8","A vector with 8 `*mut T` lanes"],["msizex2","A vector mask with 2 `msize` lanes."],["msizex4","A vector mask with 4 `msize` lanes."],["msizex8","A vector mask with 8 `msize` lanes."],["u128x1","A 128-bit vector with 1 `u128` lane."],["u128x2","A 256-bit vector with 2 `u128` lanes."],["u128x4","A 512-bit vector with 4 `u128` lanes."],["u16x16","A 256-bit vector with 16 `u16` lanes."],["u16x2","A 32-bit vector with 2 `u16` lanes."],["u16x32","A 512-bit vector with 32 `u16` lanes."],["u16x4","A 64-bit vector with 4 `u16` lanes."],["u16x8","A 128-bit vector with 8 `u16` lanes."],["u32x16","A 512-bit vector with 16 `u32` lanes."],["u32x2","A 64-bit vector with 2 `u32` lanes."],["u32x4","A 128-bit vector with 4 `u32` lanes."],["u32x8","A 256-bit vector with 8 `u32` lanes."],["u64x2","A 128-bit vector with 2 `u64` lanes."],["u64x4","A 256-bit vector with 4 `u64` lanes."],["u64x8","A 512-bit vector with 8 `u64` lanes."],["u8x16","A 128-bit vector with 16 `u8` lanes."],["u8x2","A 16-bit vector with 2 `u8` lanes."],["u8x32","A 256-bit vector with 32 `u8` lanes."],["u8x4","A 32-bit vector with 4 `u8` lanes."],["u8x64","A 512-bit vector with 64 `u8` lanes."],["u8x8","A 64-bit vector with 8 `u8` lanes."],["usizex2","A vector with 2 `usize` lanes."],["usizex4","A vector with 4 `usize` lanes."],["usizex8","A vector with 8 `usize` lanes."]]}); \ No newline at end of file diff --git a/packed_simd/struct.LexicographicallyOrdered.html b/packed_simd/struct.LexicographicallyOrdered.html new file mode 100644 index 000000000..abea6e9e9 --- /dev/null +++ b/packed_simd/struct.LexicographicallyOrdered.html @@ -0,0 +1,833 @@ +packed_simd::LexicographicallyOrdered - Rust

[][src]Struct packed_simd::LexicographicallyOrdered

#[repr(transparent)]pub struct LexicographicallyOrdered<T>(_);

Wrapper over T implementing a lexicoraphical order via the PartialOrd +and/or Ord traits.

+

Trait Implementations

impl<T: Clone> Clone for LexicographicallyOrdered<T>[src]

impl<T: Copy> Copy for LexicographicallyOrdered<T>[src]

impl<T: Debug> Debug for LexicographicallyOrdered<T>[src]

impl Eq for LexicographicallyOrdered<i8x2>[src]

impl Eq for LexicographicallyOrdered<u8x2>[src]

impl Eq for LexicographicallyOrdered<u8x8>[src]

impl Eq for LexicographicallyOrdered<m8x8>[src]

impl Eq for LexicographicallyOrdered<i16x4>[src]

impl Eq for LexicographicallyOrdered<u16x4>[src]

impl Eq for LexicographicallyOrdered<m16x4>[src]

impl Eq for LexicographicallyOrdered<i32x2>[src]

impl Eq for LexicographicallyOrdered<u32x2>[src]

impl Eq for LexicographicallyOrdered<m32x2>[src]

impl Eq for LexicographicallyOrdered<i8x16>[src]

impl Eq for LexicographicallyOrdered<u8x16>[src]

impl Eq for LexicographicallyOrdered<m8x2>[src]

impl Eq for LexicographicallyOrdered<m8x16>[src]

impl Eq for LexicographicallyOrdered<i16x8>[src]

impl Eq for LexicographicallyOrdered<u16x8>[src]

impl Eq for LexicographicallyOrdered<m16x8>[src]

impl Eq for LexicographicallyOrdered<i32x4>[src]

impl Eq for LexicographicallyOrdered<u32x4>[src]

impl Eq for LexicographicallyOrdered<m32x4>[src]

impl Eq for LexicographicallyOrdered<i64x2>[src]

impl Eq for LexicographicallyOrdered<u64x2>[src]

impl Eq for LexicographicallyOrdered<m64x2>[src]

impl Eq for LexicographicallyOrdered<i8x4>[src]

impl Eq for LexicographicallyOrdered<i128x1>[src]

impl Eq for LexicographicallyOrdered<u128x1>[src]

impl Eq for LexicographicallyOrdered<m128x1>[src]

impl Eq for LexicographicallyOrdered<i8x32>[src]

impl Eq for LexicographicallyOrdered<u8x32>[src]

impl Eq for LexicographicallyOrdered<m8x32>[src]

impl Eq for LexicographicallyOrdered<i16x16>[src]

impl Eq for LexicographicallyOrdered<u16x16>[src]

impl Eq for LexicographicallyOrdered<m16x16>[src]

impl Eq for LexicographicallyOrdered<i32x8>[src]

impl Eq for LexicographicallyOrdered<u8x4>[src]

impl Eq for LexicographicallyOrdered<u32x8>[src]

impl Eq for LexicographicallyOrdered<m32x8>[src]

impl Eq for LexicographicallyOrdered<i64x4>[src]

impl Eq for LexicographicallyOrdered<u64x4>[src]

impl Eq for LexicographicallyOrdered<m64x4>[src]

impl Eq for LexicographicallyOrdered<i128x2>[src]

impl Eq for LexicographicallyOrdered<u128x2>[src]

impl Eq for LexicographicallyOrdered<m128x2>[src]

impl Eq for LexicographicallyOrdered<i8x64>[src]

impl Eq for LexicographicallyOrdered<u8x64>[src]

impl Eq for LexicographicallyOrdered<m8x4>[src]

impl Eq for LexicographicallyOrdered<m8x64>[src]

impl Eq for LexicographicallyOrdered<i16x32>[src]

impl Eq for LexicographicallyOrdered<u16x32>[src]

impl Eq for LexicographicallyOrdered<m16x32>[src]

impl Eq for LexicographicallyOrdered<i32x16>[src]

impl Eq for LexicographicallyOrdered<u32x16>[src]

impl Eq for LexicographicallyOrdered<m32x16>[src]

impl Eq for LexicographicallyOrdered<i64x8>[src]

impl Eq for LexicographicallyOrdered<u64x8>[src]

impl Eq for LexicographicallyOrdered<m64x8>[src]

impl Eq for LexicographicallyOrdered<i16x2>[src]

impl Eq for LexicographicallyOrdered<i128x4>[src]

impl Eq for LexicographicallyOrdered<u128x4>[src]

impl Eq for LexicographicallyOrdered<m128x4>[src]

impl Eq for LexicographicallyOrdered<isizex2>[src]

impl Eq for LexicographicallyOrdered<usizex2>[src]

impl Eq for LexicographicallyOrdered<msizex2>[src]

impl Eq for LexicographicallyOrdered<isizex4>[src]

impl Eq for LexicographicallyOrdered<usizex4>[src]

impl Eq for LexicographicallyOrdered<msizex4>[src]

impl Eq for LexicographicallyOrdered<isizex8>[src]

impl Eq for LexicographicallyOrdered<u16x2>[src]

impl Eq for LexicographicallyOrdered<usizex8>[src]

impl Eq for LexicographicallyOrdered<msizex8>[src]

impl<T> Eq for LexicographicallyOrdered<cptrx2<T>>[src]

impl<T> Eq for LexicographicallyOrdered<mptrx2<T>>[src]

impl<T> Eq for LexicographicallyOrdered<cptrx4<T>>[src]

impl<T> Eq for LexicographicallyOrdered<mptrx4<T>>[src]

impl<T> Eq for LexicographicallyOrdered<cptrx8<T>>[src]

impl<T> Eq for LexicographicallyOrdered<mptrx8<T>>[src]

impl Eq for LexicographicallyOrdered<m16x2>[src]

impl Eq for LexicographicallyOrdered<i8x8>[src]

impl Ord for LexicographicallyOrdered<i8x2>[src]

impl Ord for LexicographicallyOrdered<u8x2>[src]

impl Ord for LexicographicallyOrdered<u8x8>[src]

impl Ord for LexicographicallyOrdered<m8x8>[src]

impl Ord for LexicographicallyOrdered<i16x4>[src]

impl Ord for LexicographicallyOrdered<u16x4>[src]

impl Ord for LexicographicallyOrdered<m16x4>[src]

impl Ord for LexicographicallyOrdered<i32x2>[src]

impl Ord for LexicographicallyOrdered<u32x2>[src]

impl Ord for LexicographicallyOrdered<m32x2>[src]

impl Ord for LexicographicallyOrdered<i8x16>[src]

impl Ord for LexicographicallyOrdered<u8x16>[src]

impl Ord for LexicographicallyOrdered<m8x2>[src]

impl Ord for LexicographicallyOrdered<m8x16>[src]

impl Ord for LexicographicallyOrdered<i16x8>[src]

impl Ord for LexicographicallyOrdered<u16x8>[src]

impl Ord for LexicographicallyOrdered<m16x8>[src]

impl Ord for LexicographicallyOrdered<i32x4>[src]

impl Ord for LexicographicallyOrdered<u32x4>[src]

impl Ord for LexicographicallyOrdered<m32x4>[src]

impl Ord for LexicographicallyOrdered<i64x2>[src]

impl Ord for LexicographicallyOrdered<u64x2>[src]

impl Ord for LexicographicallyOrdered<m64x2>[src]

impl Ord for LexicographicallyOrdered<i8x4>[src]

impl Ord for LexicographicallyOrdered<i128x1>[src]

impl Ord for LexicographicallyOrdered<u128x1>[src]

impl Ord for LexicographicallyOrdered<m128x1>[src]

impl Ord for LexicographicallyOrdered<i8x32>[src]

impl Ord for LexicographicallyOrdered<u8x32>[src]

impl Ord for LexicographicallyOrdered<m8x32>[src]

impl Ord for LexicographicallyOrdered<i16x16>[src]

impl Ord for LexicographicallyOrdered<u16x16>[src]

impl Ord for LexicographicallyOrdered<m16x16>[src]

impl Ord for LexicographicallyOrdered<i32x8>[src]

impl Ord for LexicographicallyOrdered<u8x4>[src]

impl Ord for LexicographicallyOrdered<u32x8>[src]

impl Ord for LexicographicallyOrdered<m32x8>[src]

impl Ord for LexicographicallyOrdered<i64x4>[src]

impl Ord for LexicographicallyOrdered<u64x4>[src]

impl Ord for LexicographicallyOrdered<m64x4>[src]

impl Ord for LexicographicallyOrdered<i128x2>[src]

impl Ord for LexicographicallyOrdered<u128x2>[src]

impl Ord for LexicographicallyOrdered<m128x2>[src]

impl Ord for LexicographicallyOrdered<i8x64>[src]

impl Ord for LexicographicallyOrdered<u8x64>[src]

impl Ord for LexicographicallyOrdered<m8x4>[src]

impl Ord for LexicographicallyOrdered<m8x64>[src]

impl Ord for LexicographicallyOrdered<i16x32>[src]

impl Ord for LexicographicallyOrdered<u16x32>[src]

impl Ord for LexicographicallyOrdered<m16x32>[src]

impl Ord for LexicographicallyOrdered<i32x16>[src]

impl Ord for LexicographicallyOrdered<u32x16>[src]

impl Ord for LexicographicallyOrdered<m32x16>[src]

impl Ord for LexicographicallyOrdered<i64x8>[src]

impl Ord for LexicographicallyOrdered<u64x8>[src]

impl Ord for LexicographicallyOrdered<m64x8>[src]

impl Ord for LexicographicallyOrdered<i16x2>[src]

impl Ord for LexicographicallyOrdered<i128x4>[src]

impl Ord for LexicographicallyOrdered<u128x4>[src]

impl Ord for LexicographicallyOrdered<m128x4>[src]

impl Ord for LexicographicallyOrdered<isizex2>[src]

impl Ord for LexicographicallyOrdered<usizex2>[src]

impl Ord for LexicographicallyOrdered<msizex2>[src]

impl Ord for LexicographicallyOrdered<isizex4>[src]

impl Ord for LexicographicallyOrdered<usizex4>[src]

impl Ord for LexicographicallyOrdered<msizex4>[src]

impl Ord for LexicographicallyOrdered<isizex8>[src]

impl Ord for LexicographicallyOrdered<u16x2>[src]

impl Ord for LexicographicallyOrdered<usizex8>[src]

impl Ord for LexicographicallyOrdered<msizex8>[src]

impl Ord for LexicographicallyOrdered<m16x2>[src]

impl Ord for LexicographicallyOrdered<i8x8>[src]

impl<T> PartialEq<LexicographicallyOrdered<Simd<[*const T; 2]>>> for LexicographicallyOrdered<cptrx2<T>>[src]

impl<T> PartialEq<LexicographicallyOrdered<Simd<[*const T; 4]>>> for LexicographicallyOrdered<cptrx4<T>>[src]

impl<T> PartialEq<LexicographicallyOrdered<Simd<[*const T; 8]>>> for LexicographicallyOrdered<cptrx8<T>>[src]

impl<T> PartialEq<LexicographicallyOrdered<Simd<[*mut T; 2]>>> for LexicographicallyOrdered<mptrx2<T>>[src]

impl<T> PartialEq<LexicographicallyOrdered<Simd<[*mut T; 4]>>> for LexicographicallyOrdered<mptrx4<T>>[src]

impl<T> PartialEq<LexicographicallyOrdered<Simd<[*mut T; 8]>>> for LexicographicallyOrdered<mptrx8<T>>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[f32; 16]>>> for LexicographicallyOrdered<f32x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[f32; 2]>>> for LexicographicallyOrdered<f32x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[f32; 4]>>> for LexicographicallyOrdered<f32x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[f32; 8]>>> for LexicographicallyOrdered<f32x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[f64; 2]>>> for LexicographicallyOrdered<f64x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[f64; 4]>>> for LexicographicallyOrdered<f64x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[f64; 8]>>> for LexicographicallyOrdered<f64x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i128; 1]>>> for LexicographicallyOrdered<i128x1>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i128; 2]>>> for LexicographicallyOrdered<i128x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i128; 4]>>> for LexicographicallyOrdered<i128x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i16; 16]>>> for LexicographicallyOrdered<i16x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i16; 2]>>> for LexicographicallyOrdered<i16x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i16; 32]>>> for LexicographicallyOrdered<i16x32>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i16; 4]>>> for LexicographicallyOrdered<i16x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i16; 8]>>> for LexicographicallyOrdered<i16x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i32; 16]>>> for LexicographicallyOrdered<i32x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i32; 2]>>> for LexicographicallyOrdered<i32x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i32; 4]>>> for LexicographicallyOrdered<i32x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i32; 8]>>> for LexicographicallyOrdered<i32x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i64; 2]>>> for LexicographicallyOrdered<i64x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i64; 4]>>> for LexicographicallyOrdered<i64x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i64; 8]>>> for LexicographicallyOrdered<i64x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i8; 16]>>> for LexicographicallyOrdered<i8x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i8; 2]>>> for LexicographicallyOrdered<i8x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i8; 32]>>> for LexicographicallyOrdered<i8x32>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i8; 4]>>> for LexicographicallyOrdered<i8x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i8; 64]>>> for LexicographicallyOrdered<i8x64>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[i8; 8]>>> for LexicographicallyOrdered<i8x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[isize; 2]>>> for LexicographicallyOrdered<isizex2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[isize; 4]>>> for LexicographicallyOrdered<isizex4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[isize; 8]>>> for LexicographicallyOrdered<isizex8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m128; 1]>>> for LexicographicallyOrdered<m128x1>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m128; 2]>>> for LexicographicallyOrdered<m128x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m128; 4]>>> for LexicographicallyOrdered<m128x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m16; 16]>>> for LexicographicallyOrdered<m16x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m16; 2]>>> for LexicographicallyOrdered<m16x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m16; 32]>>> for LexicographicallyOrdered<m16x32>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m16; 4]>>> for LexicographicallyOrdered<m16x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m16; 8]>>> for LexicographicallyOrdered<m16x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m32; 16]>>> for LexicographicallyOrdered<m32x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m32; 2]>>> for LexicographicallyOrdered<m32x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m32; 4]>>> for LexicographicallyOrdered<m32x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m32; 8]>>> for LexicographicallyOrdered<m32x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m64; 2]>>> for LexicographicallyOrdered<m64x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m64; 4]>>> for LexicographicallyOrdered<m64x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m64; 8]>>> for LexicographicallyOrdered<m64x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m8; 16]>>> for LexicographicallyOrdered<m8x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m8; 2]>>> for LexicographicallyOrdered<m8x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m8; 32]>>> for LexicographicallyOrdered<m8x32>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m8; 4]>>> for LexicographicallyOrdered<m8x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m8; 64]>>> for LexicographicallyOrdered<m8x64>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[m8; 8]>>> for LexicographicallyOrdered<m8x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[msize; 2]>>> for LexicographicallyOrdered<msizex2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[msize; 4]>>> for LexicographicallyOrdered<msizex4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[msize; 8]>>> for LexicographicallyOrdered<msizex8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u128; 1]>>> for LexicographicallyOrdered<u128x1>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u128; 2]>>> for LexicographicallyOrdered<u128x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u128; 4]>>> for LexicographicallyOrdered<u128x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u16; 16]>>> for LexicographicallyOrdered<u16x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u16; 2]>>> for LexicographicallyOrdered<u16x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u16; 32]>>> for LexicographicallyOrdered<u16x32>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u16; 4]>>> for LexicographicallyOrdered<u16x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u16; 8]>>> for LexicographicallyOrdered<u16x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u32; 16]>>> for LexicographicallyOrdered<u32x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u32; 2]>>> for LexicographicallyOrdered<u32x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u32; 4]>>> for LexicographicallyOrdered<u32x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u32; 8]>>> for LexicographicallyOrdered<u32x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u64; 2]>>> for LexicographicallyOrdered<u64x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u64; 4]>>> for LexicographicallyOrdered<u64x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u64; 8]>>> for LexicographicallyOrdered<u64x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u8; 16]>>> for LexicographicallyOrdered<u8x16>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u8; 2]>>> for LexicographicallyOrdered<u8x2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u8; 32]>>> for LexicographicallyOrdered<u8x32>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u8; 4]>>> for LexicographicallyOrdered<u8x4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u8; 64]>>> for LexicographicallyOrdered<u8x64>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[u8; 8]>>> for LexicographicallyOrdered<u8x8>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[usize; 2]>>> for LexicographicallyOrdered<usizex2>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[usize; 4]>>> for LexicographicallyOrdered<usizex4>[src]

impl PartialEq<LexicographicallyOrdered<Simd<[usize; 8]>>> for LexicographicallyOrdered<usizex8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i128; 1]>>> for LexicographicallyOrdered<i128x1>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i128; 2]>>> for LexicographicallyOrdered<i128x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i128; 4]>>> for LexicographicallyOrdered<i128x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 16]>>> for LexicographicallyOrdered<i16x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 2]>>> for LexicographicallyOrdered<i16x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 32]>>> for LexicographicallyOrdered<i16x32>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 4]>>> for LexicographicallyOrdered<i16x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i16; 8]>>> for LexicographicallyOrdered<i16x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 16]>>> for LexicographicallyOrdered<i32x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 2]>>> for LexicographicallyOrdered<i32x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 4]>>> for LexicographicallyOrdered<i32x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i32; 8]>>> for LexicographicallyOrdered<i32x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i64; 2]>>> for LexicographicallyOrdered<i64x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i64; 4]>>> for LexicographicallyOrdered<i64x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i64; 8]>>> for LexicographicallyOrdered<i64x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 16]>>> for LexicographicallyOrdered<i8x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 2]>>> for LexicographicallyOrdered<i8x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 32]>>> for LexicographicallyOrdered<i8x32>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 4]>>> for LexicographicallyOrdered<i8x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 64]>>> for LexicographicallyOrdered<i8x64>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[i8; 8]>>> for LexicographicallyOrdered<i8x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[isize; 2]>>> for LexicographicallyOrdered<isizex2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[isize; 4]>>> for LexicographicallyOrdered<isizex4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[isize; 8]>>> for LexicographicallyOrdered<isizex8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m128; 1]>>> for LexicographicallyOrdered<m128x1>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m128; 2]>>> for LexicographicallyOrdered<m128x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m128; 4]>>> for LexicographicallyOrdered<m128x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 16]>>> for LexicographicallyOrdered<m16x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 2]>>> for LexicographicallyOrdered<m16x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 32]>>> for LexicographicallyOrdered<m16x32>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 4]>>> for LexicographicallyOrdered<m16x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m16; 8]>>> for LexicographicallyOrdered<m16x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 16]>>> for LexicographicallyOrdered<m32x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 2]>>> for LexicographicallyOrdered<m32x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 4]>>> for LexicographicallyOrdered<m32x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m32; 8]>>> for LexicographicallyOrdered<m32x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m64; 2]>>> for LexicographicallyOrdered<m64x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m64; 4]>>> for LexicographicallyOrdered<m64x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m64; 8]>>> for LexicographicallyOrdered<m64x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 16]>>> for LexicographicallyOrdered<m8x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 2]>>> for LexicographicallyOrdered<m8x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 32]>>> for LexicographicallyOrdered<m8x32>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 4]>>> for LexicographicallyOrdered<m8x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 64]>>> for LexicographicallyOrdered<m8x64>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[m8; 8]>>> for LexicographicallyOrdered<m8x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[msize; 2]>>> for LexicographicallyOrdered<msizex2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[msize; 4]>>> for LexicographicallyOrdered<msizex4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[msize; 8]>>> for LexicographicallyOrdered<msizex8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u128; 1]>>> for LexicographicallyOrdered<u128x1>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u128; 2]>>> for LexicographicallyOrdered<u128x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u128; 4]>>> for LexicographicallyOrdered<u128x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 16]>>> for LexicographicallyOrdered<u16x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 2]>>> for LexicographicallyOrdered<u16x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 32]>>> for LexicographicallyOrdered<u16x32>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 4]>>> for LexicographicallyOrdered<u16x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u16; 8]>>> for LexicographicallyOrdered<u16x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 16]>>> for LexicographicallyOrdered<u32x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 2]>>> for LexicographicallyOrdered<u32x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 4]>>> for LexicographicallyOrdered<u32x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u32; 8]>>> for LexicographicallyOrdered<u32x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u64; 2]>>> for LexicographicallyOrdered<u64x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u64; 4]>>> for LexicographicallyOrdered<u64x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u64; 8]>>> for LexicographicallyOrdered<u64x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 16]>>> for LexicographicallyOrdered<u8x16>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 2]>>> for LexicographicallyOrdered<u8x2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 32]>>> for LexicographicallyOrdered<u8x32>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 4]>>> for LexicographicallyOrdered<u8x4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 64]>>> for LexicographicallyOrdered<u8x64>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[u8; 8]>>> for LexicographicallyOrdered<u8x8>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[usize; 2]>>> for LexicographicallyOrdered<usizex2>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[usize; 4]>>> for LexicographicallyOrdered<usizex4>[src]

impl PartialOrd<LexicographicallyOrdered<Simd<[usize; 8]>>> for LexicographicallyOrdered<usizex8>[src]

Auto Trait Implementations

impl<T> Send for LexicographicallyOrdered<T> where
    T: Send

impl<T> Sync for LexicographicallyOrdered<T> where
    T: Sync

impl<T> Unpin for LexicographicallyOrdered<T> where
    T: Unpin

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/struct.Simd.html b/packed_simd/struct.Simd.html new file mode 100644 index 000000000..03b1b2378 --- /dev/null +++ b/packed_simd/struct.Simd.html @@ -0,0 +1,15270 @@ +packed_simd::Simd - Rust

[][src]Struct packed_simd::Simd

#[repr(transparent)]pub struct Simd<A: SimdArray>(_);

Packed SIMD vector type.

+

Examples

+
+let v = Simd::<[i32; 4]>::new(0, 1, 2, 3);
+assert_eq!(v.extract(2), 2);
+

Implementations

impl Simd<[i8; 2]>[src]

pub const fn new(x0: i8, x1: i8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 2]>[src]

pub fn rotate_left(self, n: i8x2) -> i8x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x2) -> i8x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i8; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i8; 2]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i8; 2]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl Simd<[i8; 2]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i8; 2]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i8; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i8; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i8; 2]>[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i8; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i8; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i8; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u8; 2]>[src]

pub const fn new(x0: u8, x1: u8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 2]>[src]

pub fn rotate_left(self, n: u8x2) -> u8x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x2) -> u8x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u8; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u8; 2]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u8; 2]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl Simd<[u8; 2]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u8; 2]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u8; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u8; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u8; 2]>[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u8; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u8; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u8; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m8; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m8; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m8; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m8; 2]>[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m8; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m8; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m8; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m8; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m8; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i8; 4]>[src]

pub const fn new(x0: i8, x1: i8, x2: i8, x3: i8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 4]>[src]

pub fn rotate_left(self, n: i8x4) -> i8x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x4) -> i8x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i8; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i8; 4]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i8; 4]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl Simd<[i8; 4]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i8; 4]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i8; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i8; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i8; 4]>[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i8; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i8; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i8; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u8; 4]>[src]

pub const fn new(x0: u8, x1: u8, x2: u8, x3: u8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 4]>[src]

pub fn rotate_left(self, n: u8x4) -> u8x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x4) -> u8x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u8; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u8; 4]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u8; 4]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl Simd<[u8; 4]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u8; 4]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u8; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u8; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u8; 4]>[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u8; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u8; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u8; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m8; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m8; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m8; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m8; 4]>[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m8; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m8; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m8; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m8; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m8; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i16; 2]>[src]

pub const fn new(x0: i16, x1: i16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 2]>[src]

pub fn rotate_left(self, n: i16x2) -> i16x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x2) -> i16x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i16; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i16; 2]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i16; 2]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl Simd<[i16; 2]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i16; 2]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i16; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i16; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i16; 2]>[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i16; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i16; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i16; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u16; 2]>[src]

pub const fn new(x0: u16, x1: u16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 2]>[src]

pub fn rotate_left(self, n: u16x2) -> u16x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x2) -> u16x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u16; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u16; 2]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u16; 2]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl Simd<[u16; 2]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u16; 2]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u16; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u16; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u16; 2]>[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u16; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u16; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u16; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m16; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m16; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m16; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m16; 2]>[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m16; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m16; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m16; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m16; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m16; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i8; 8]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 8]>[src]

pub fn rotate_left(self, n: i8x8) -> i8x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x8) -> i8x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i8; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i8; 8]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i8; 8]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl Simd<[i8; 8]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i8; 8]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i8; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i8; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i8; 8]>[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i8; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i8; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i8; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u8; 8]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 8]>[src]

pub fn rotate_left(self, n: u8x8) -> u8x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x8) -> u8x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u8; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u8; 8]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u8; 8]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl Simd<[u8; 8]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u8; 8]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u8; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u8; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u8; 8]>[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u8; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u8; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u8; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m8; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m8; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m8; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m8; 8]>[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m8; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m8; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m8; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m8; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m8; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i16; 4]>[src]

pub const fn new(x0: i16, x1: i16, x2: i16, x3: i16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 4]>[src]

pub fn rotate_left(self, n: i16x4) -> i16x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x4) -> i16x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i16; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i16; 4]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i16; 4]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl Simd<[i16; 4]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i16; 4]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i16; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i16; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i16; 4]>[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i16; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i16; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i16; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u16; 4]>[src]

pub const fn new(x0: u16, x1: u16, x2: u16, x3: u16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 4]>[src]

pub fn rotate_left(self, n: u16x4) -> u16x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x4) -> u16x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u16; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u16; 4]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u16; 4]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl Simd<[u16; 4]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u16; 4]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u16; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u16; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u16; 4]>[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u16; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u16; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u16; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m16; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m16; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m16; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m16; 4]>[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m16; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m16; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m16; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m16; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m16; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i32; 2]>[src]

pub const fn new(x0: i32, x1: i32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 2]>[src]

pub fn rotate_left(self, n: i32x2) -> i32x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x2) -> i32x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i32; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i32; 2]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i32; 2]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl Simd<[i32; 2]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i32; 2]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i32; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i32; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i32; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i32; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u32; 2]>[src]

pub const fn new(x0: u32, x1: u32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 2]>[src]

pub fn rotate_left(self, n: u32x2) -> u32x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x2) -> u32x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u32; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u32; 2]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u32; 2]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl Simd<[u32; 2]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u32; 2]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u32; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u32; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u32; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u32; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m32; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m32; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m32; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m32; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m32; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m32; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m32; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[f32; 2]>[src]

pub const fn new(x0: f32, x1: f32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[f32; 2]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl Simd<[f32; 2]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl Simd<[f32; 2]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[f32; 2]>[src]

pub const EPSILON: f32x2[src]

Machine epsilon value.

+

pub const MIN: f32x2[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x2[src]

Smallest positive normal value.

+

pub const MAX: f32x2[src]

Largest finite value.

+

pub const NAN: f32x2[src]

Not a Number (NaN).

+

pub const INFINITY: f32x2[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x2[src]

Negative infinity (-∞).

+

pub const PI: f32x2[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x2[src]

π/2

+

pub const FRAC_PI_3: f32x2[src]

π/3

+

pub const FRAC_PI_4: f32x2[src]

π/4

+

pub const FRAC_PI_6: f32x2[src]

π/6

+

pub const FRAC_PI_8: f32x2[src]

π/8

+

pub const FRAC_1_PI: f32x2[src]

1/π

+

pub const FRAC_2_PI: f32x2[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x2[src]

2/sqrt(π)

+

pub const SQRT_2: f32x2[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x2[src]

1/sqrt(2)

+

pub const E: f32x2[src]

Euler's number (e)

+

pub const LOG2_E: f32x2[src]

log2(e)

+

pub const LOG10_E: f32x2[src]

log10(e)

+

pub const LN_2: f32x2[src]

ln(2)

+

pub const LN_10: f32x2[src]

ln(10)

+

impl Simd<[f32; 2]>[src]

pub fn is_nan(self) -> m32x2[src]

pub fn is_infinite(self) -> m32x2[src]

pub fn is_finite(self) -> m32x2[src]

impl Simd<[f32; 2]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl Simd<[f32; 2]>[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl Simd<[f32; 2]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl Simd<[f32; 2]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl Simd<[f32; 2]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl Simd<[f32; 2]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl Simd<[f32; 2]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl Simd<[f32; 2]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 2]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 2]>[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl Simd<[f32; 2]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 2]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 2]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl Simd<[f32; 2]>[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i8; 16]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 16]>[src]

pub fn rotate_left(self, n: i8x16) -> i8x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x16) -> i8x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i8; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i8; 16]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i8; 16]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl Simd<[i8; 16]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i8; 16]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i8; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i8; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i8; 16]>[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i8; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i8; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i8; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u8; 16]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 16]>[src]

pub fn rotate_left(self, n: u8x16) -> u8x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x16) -> u8x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u8; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u8; 16]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u8; 16]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl Simd<[u8; 16]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u8; 16]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u8; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u8; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u8; 16]>[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u8; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u8; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u8; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m8; 16]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m8; 16]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m8; 16]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m8; 16]>[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m8; 16]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m8; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m8; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m8; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m8; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i16; 8]>[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 8]>[src]

pub fn rotate_left(self, n: i16x8) -> i16x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x8) -> i16x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i16; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i16; 8]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i16; 8]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl Simd<[i16; 8]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i16; 8]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i16; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i16; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i16; 8]>[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i16; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i16; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i16; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u16; 8]>[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 8]>[src]

pub fn rotate_left(self, n: u16x8) -> u16x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x8) -> u16x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u16; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u16; 8]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u16; 8]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl Simd<[u16; 8]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u16; 8]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u16; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u16; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u16; 8]>[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u16; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u16; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u16; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m16; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m16; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m16; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m16; 8]>[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m16; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m16; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m16; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m16; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m16; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i32; 4]>[src]

pub const fn new(x0: i32, x1: i32, x2: i32, x3: i32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 4]>[src]

pub fn rotate_left(self, n: i32x4) -> i32x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x4) -> i32x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i32; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i32; 4]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i32; 4]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl Simd<[i32; 4]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i32; 4]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i32; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i32; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i32; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i32; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u32; 4]>[src]

pub const fn new(x0: u32, x1: u32, x2: u32, x3: u32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 4]>[src]

pub fn rotate_left(self, n: u32x4) -> u32x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x4) -> u32x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u32; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u32; 4]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u32; 4]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl Simd<[u32; 4]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u32; 4]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u32; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u32; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u32; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u32; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[f32; 4]>[src]

pub const fn new(x0: f32, x1: f32, x2: f32, x3: f32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[f32; 4]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl Simd<[f32; 4]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl Simd<[f32; 4]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[f32; 4]>[src]

pub const EPSILON: f32x4[src]

Machine epsilon value.

+

pub const MIN: f32x4[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x4[src]

Smallest positive normal value.

+

pub const MAX: f32x4[src]

Largest finite value.

+

pub const NAN: f32x4[src]

Not a Number (NaN).

+

pub const INFINITY: f32x4[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x4[src]

Negative infinity (-∞).

+

pub const PI: f32x4[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x4[src]

π/2

+

pub const FRAC_PI_3: f32x4[src]

π/3

+

pub const FRAC_PI_4: f32x4[src]

π/4

+

pub const FRAC_PI_6: f32x4[src]

π/6

+

pub const FRAC_PI_8: f32x4[src]

π/8

+

pub const FRAC_1_PI: f32x4[src]

1/π

+

pub const FRAC_2_PI: f32x4[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x4[src]

2/sqrt(π)

+

pub const SQRT_2: f32x4[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x4[src]

1/sqrt(2)

+

pub const E: f32x4[src]

Euler's number (e)

+

pub const LOG2_E: f32x4[src]

log2(e)

+

pub const LOG10_E: f32x4[src]

log10(e)

+

pub const LN_2: f32x4[src]

ln(2)

+

pub const LN_10: f32x4[src]

ln(10)

+

impl Simd<[f32; 4]>[src]

pub fn is_nan(self) -> m32x4[src]

pub fn is_infinite(self) -> m32x4[src]

pub fn is_finite(self) -> m32x4[src]

impl Simd<[f32; 4]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl Simd<[f32; 4]>[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl Simd<[f32; 4]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl Simd<[f32; 4]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl Simd<[f32; 4]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl Simd<[f32; 4]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl Simd<[f32; 4]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl Simd<[f32; 4]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 4]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 4]>[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl Simd<[f32; 4]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 4]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 4]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl Simd<[f32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m32; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m32; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m32; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m32; 4]>[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m32; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m32; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m32; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m32; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m32; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i64; 2]>[src]

pub const fn new(x0: i64, x1: i64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 2]>[src]

pub fn rotate_left(self, n: i64x2) -> i64x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i64x2) -> i64x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i64; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i64; 2]>[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i64; 2]>[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

+

pub fn min_element(self) -> i64[src]

Smallest vector element value.

+

impl Simd<[i64; 2]>[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i64; 2]>[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i64; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i64; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i64; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i64; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u64; 2]>[src]

pub const fn new(x0: u64, x1: u64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 2]>[src]

pub fn rotate_left(self, n: u64x2) -> u64x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u64x2) -> u64x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u64; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u64; 2]>[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u64; 2]>[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

+

pub fn min_element(self) -> u64[src]

Smallest vector element value.

+

impl Simd<[u64; 2]>[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u64; 2]>[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u64; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u64; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u64; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u64; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[f64; 2]>[src]

pub const fn new(x0: f64, x1: f64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[f64; 2]>[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl Simd<[f64; 2]>[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

+

pub fn min_element(self) -> f64[src]

Smallest vector element value.

+

impl Simd<[f64; 2]>[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[f64; 2]>[src]

pub const EPSILON: f64x2[src]

Machine epsilon value.

+

pub const MIN: f64x2[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f64x2[src]

Smallest positive normal value.

+

pub const MAX: f64x2[src]

Largest finite value.

+

pub const NAN: f64x2[src]

Not a Number (NaN).

+

pub const INFINITY: f64x2[src]

Infinity (∞).

+

pub const NEG_INFINITY: f64x2[src]

Negative infinity (-∞).

+

pub const PI: f64x2[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f64x2[src]

π/2

+

pub const FRAC_PI_3: f64x2[src]

π/3

+

pub const FRAC_PI_4: f64x2[src]

π/4

+

pub const FRAC_PI_6: f64x2[src]

π/6

+

pub const FRAC_PI_8: f64x2[src]

π/8

+

pub const FRAC_1_PI: f64x2[src]

1/π

+

pub const FRAC_2_PI: f64x2[src]

2/π

+

pub const FRAC_2_SQRT_PI: f64x2[src]

2/sqrt(π)

+

pub const SQRT_2: f64x2[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f64x2[src]

1/sqrt(2)

+

pub const E: f64x2[src]

Euler's number (e)

+

pub const LOG2_E: f64x2[src]

log2(e)

+

pub const LOG10_E: f64x2[src]

log10(e)

+

pub const LN_2: f64x2[src]

ln(2)

+

pub const LN_10: f64x2[src]

ln(10)

+

impl Simd<[f64; 2]>[src]

pub fn is_nan(self) -> m64x2[src]

pub fn is_infinite(self) -> m64x2[src]

pub fn is_finite(self) -> m64x2[src]

impl Simd<[f64; 2]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl Simd<[f64; 2]>[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl Simd<[f64; 2]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl Simd<[f64; 2]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl Simd<[f64; 2]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl Simd<[f64; 2]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl Simd<[f64; 2]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl Simd<[f64; 2]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 2]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 2]>[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl Simd<[f64; 2]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f64; 2]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 2]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl Simd<[f64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m64; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m64; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m64; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m64; 2]>[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m64; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m64; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m64; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m64; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m64; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i128; 1]>[src]

pub const fn new(x0: i128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 1]>[src]

pub fn rotate_left(self, n: i128x1) -> i128x1[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i128x1) -> i128x1[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i128; 1]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i128; 1]>[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i128; 1]>[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

+

pub fn min_element(self) -> i128[src]

Smallest vector element value.

+

impl Simd<[i128; 1]>[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i128; 1]>[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 1]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 1]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i128; 1]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i128; 1]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i128; 1]>[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i128; 1]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x1>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i128; 1]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x1>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i128; 1]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u128; 1]>[src]

pub const fn new(x0: u128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 1]>[src]

pub fn rotate_left(self, n: u128x1) -> u128x1[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u128x1) -> u128x1[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u128; 1]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u128; 1]>[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u128; 1]>[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

+

pub fn min_element(self) -> u128[src]

Smallest vector element value.

+

impl Simd<[u128; 1]>[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u128; 1]>[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 1]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 1]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u128; 1]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u128; 1]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u128; 1]>[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u128; 1]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x1>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u128; 1]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x1>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u128; 1]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m128; 1]>[src]

pub const fn new(x0: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m128; 1]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m128; 1]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m128; 1]>[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m128; 1]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 1] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m128; 1]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x1>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m128; 1]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x1>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m128; 1]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m128; 1]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i8; 32]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8,
    x16: i8,
    x17: i8,
    x18: i8,
    x19: i8,
    x20: i8,
    x21: i8,
    x22: i8,
    x23: i8,
    x24: i8,
    x25: i8,
    x26: i8,
    x27: i8,
    x28: i8,
    x29: i8,
    x30: i8,
    x31: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 32]>[src]

pub fn rotate_left(self, n: i8x32) -> i8x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x32) -> i8x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i8; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i8; 32]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i8; 32]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl Simd<[i8; 32]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i8; 32]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i8; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i8; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i8; 32]>[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i8; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i8; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x32>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i8; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u8; 32]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8,
    x16: u8,
    x17: u8,
    x18: u8,
    x19: u8,
    x20: u8,
    x21: u8,
    x22: u8,
    x23: u8,
    x24: u8,
    x25: u8,
    x26: u8,
    x27: u8,
    x28: u8,
    x29: u8,
    x30: u8,
    x31: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 32]>[src]

pub fn rotate_left(self, n: u8x32) -> u8x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x32) -> u8x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u8; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u8; 32]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u8; 32]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl Simd<[u8; 32]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u8; 32]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u8; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u8; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u8; 32]>[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u8; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u8; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x32>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u8; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m8; 32]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m8; 32]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m8; 32]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m8; 32]>[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m8; 32]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 32] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m8; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m8; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x32>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m8; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m8; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i16; 16]>[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16,
    x8: i16,
    x9: i16,
    x10: i16,
    x11: i16,
    x12: i16,
    x13: i16,
    x14: i16,
    x15: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 16]>[src]

pub fn rotate_left(self, n: i16x16) -> i16x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x16) -> i16x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i16; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i16; 16]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i16; 16]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl Simd<[i16; 16]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i16; 16]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i16; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i16; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i16; 16]>[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i16; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i16; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i16; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u16; 16]>[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16,
    x8: u16,
    x9: u16,
    x10: u16,
    x11: u16,
    x12: u16,
    x13: u16,
    x14: u16,
    x15: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 16]>[src]

pub fn rotate_left(self, n: u16x16) -> u16x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x16) -> u16x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u16; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u16; 16]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u16; 16]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl Simd<[u16; 16]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u16; 16]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u16; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u16; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u16; 16]>[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u16; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u16; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u16; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m16; 16]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m16; 16]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m16; 16]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m16; 16]>[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m16; 16]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m16; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m16; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m16; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m16; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i32; 8]>[src]

pub const fn new(
    x0: i32,
    x1: i32,
    x2: i32,
    x3: i32,
    x4: i32,
    x5: i32,
    x6: i32,
    x7: i32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 8]>[src]

pub fn rotate_left(self, n: i32x8) -> i32x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x8) -> i32x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i32; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i32; 8]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i32; 8]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl Simd<[i32; 8]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i32; 8]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i32; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i32; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i32; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i32; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u32; 8]>[src]

pub const fn new(
    x0: u32,
    x1: u32,
    x2: u32,
    x3: u32,
    x4: u32,
    x5: u32,
    x6: u32,
    x7: u32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 8]>[src]

pub fn rotate_left(self, n: u32x8) -> u32x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x8) -> u32x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u32; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u32; 8]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u32; 8]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl Simd<[u32; 8]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u32; 8]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u32; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u32; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u32; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u32; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[f32; 8]>[src]

pub const fn new(
    x0: f32,
    x1: f32,
    x2: f32,
    x3: f32,
    x4: f32,
    x5: f32,
    x6: f32,
    x7: f32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[f32; 8]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl Simd<[f32; 8]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl Simd<[f32; 8]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[f32; 8]>[src]

pub const EPSILON: f32x8[src]

Machine epsilon value.

+

pub const MIN: f32x8[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x8[src]

Smallest positive normal value.

+

pub const MAX: f32x8[src]

Largest finite value.

+

pub const NAN: f32x8[src]

Not a Number (NaN).

+

pub const INFINITY: f32x8[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x8[src]

Negative infinity (-∞).

+

pub const PI: f32x8[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x8[src]

π/2

+

pub const FRAC_PI_3: f32x8[src]

π/3

+

pub const FRAC_PI_4: f32x8[src]

π/4

+

pub const FRAC_PI_6: f32x8[src]

π/6

+

pub const FRAC_PI_8: f32x8[src]

π/8

+

pub const FRAC_1_PI: f32x8[src]

1/π

+

pub const FRAC_2_PI: f32x8[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x8[src]

2/sqrt(π)

+

pub const SQRT_2: f32x8[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x8[src]

1/sqrt(2)

+

pub const E: f32x8[src]

Euler's number (e)

+

pub const LOG2_E: f32x8[src]

log2(e)

+

pub const LOG10_E: f32x8[src]

log10(e)

+

pub const LN_2: f32x8[src]

ln(2)

+

pub const LN_10: f32x8[src]

ln(10)

+

impl Simd<[f32; 8]>[src]

pub fn is_nan(self) -> m32x8[src]

pub fn is_infinite(self) -> m32x8[src]

pub fn is_finite(self) -> m32x8[src]

impl Simd<[f32; 8]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl Simd<[f32; 8]>[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl Simd<[f32; 8]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl Simd<[f32; 8]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl Simd<[f32; 8]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl Simd<[f32; 8]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl Simd<[f32; 8]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl Simd<[f32; 8]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 8]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 8]>[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl Simd<[f32; 8]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 8]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 8]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl Simd<[f32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m32; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m32; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m32; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m32; 8]>[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m32; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m32; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m32; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m32; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m32; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i64; 4]>[src]

pub const fn new(x0: i64, x1: i64, x2: i64, x3: i64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 4]>[src]

pub fn rotate_left(self, n: i64x4) -> i64x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i64x4) -> i64x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i64; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i64; 4]>[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i64; 4]>[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

+

pub fn min_element(self) -> i64[src]

Smallest vector element value.

+

impl Simd<[i64; 4]>[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i64; 4]>[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i64; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i64; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i64; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i64; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u64; 4]>[src]

pub const fn new(x0: u64, x1: u64, x2: u64, x3: u64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 4]>[src]

pub fn rotate_left(self, n: u64x4) -> u64x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u64x4) -> u64x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u64; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u64; 4]>[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u64; 4]>[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

+

pub fn min_element(self) -> u64[src]

Smallest vector element value.

+

impl Simd<[u64; 4]>[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u64; 4]>[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u64; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u64; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u64; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u64; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[f64; 4]>[src]

pub const fn new(x0: f64, x1: f64, x2: f64, x3: f64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[f64; 4]>[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl Simd<[f64; 4]>[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

+

pub fn min_element(self) -> f64[src]

Smallest vector element value.

+

impl Simd<[f64; 4]>[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[f64; 4]>[src]

pub const EPSILON: f64x4[src]

Machine epsilon value.

+

pub const MIN: f64x4[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f64x4[src]

Smallest positive normal value.

+

pub const MAX: f64x4[src]

Largest finite value.

+

pub const NAN: f64x4[src]

Not a Number (NaN).

+

pub const INFINITY: f64x4[src]

Infinity (∞).

+

pub const NEG_INFINITY: f64x4[src]

Negative infinity (-∞).

+

pub const PI: f64x4[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f64x4[src]

π/2

+

pub const FRAC_PI_3: f64x4[src]

π/3

+

pub const FRAC_PI_4: f64x4[src]

π/4

+

pub const FRAC_PI_6: f64x4[src]

π/6

+

pub const FRAC_PI_8: f64x4[src]

π/8

+

pub const FRAC_1_PI: f64x4[src]

1/π

+

pub const FRAC_2_PI: f64x4[src]

2/π

+

pub const FRAC_2_SQRT_PI: f64x4[src]

2/sqrt(π)

+

pub const SQRT_2: f64x4[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f64x4[src]

1/sqrt(2)

+

pub const E: f64x4[src]

Euler's number (e)

+

pub const LOG2_E: f64x4[src]

log2(e)

+

pub const LOG10_E: f64x4[src]

log10(e)

+

pub const LN_2: f64x4[src]

ln(2)

+

pub const LN_10: f64x4[src]

ln(10)

+

impl Simd<[f64; 4]>[src]

pub fn is_nan(self) -> m64x4[src]

pub fn is_infinite(self) -> m64x4[src]

pub fn is_finite(self) -> m64x4[src]

impl Simd<[f64; 4]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl Simd<[f64; 4]>[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl Simd<[f64; 4]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl Simd<[f64; 4]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl Simd<[f64; 4]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl Simd<[f64; 4]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl Simd<[f64; 4]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl Simd<[f64; 4]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 4]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 4]>[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl Simd<[f64; 4]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f64; 4]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 4]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl Simd<[f64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m64; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m64; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m64; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m64; 4]>[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m64; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m64; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m64; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m64; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m64; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i128; 2]>[src]

pub const fn new(x0: i128, x1: i128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 2]>[src]

pub fn rotate_left(self, n: i128x2) -> i128x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i128x2) -> i128x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i128; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i128; 2]>[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i128; 2]>[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

+

pub fn min_element(self) -> i128[src]

Smallest vector element value.

+

impl Simd<[i128; 2]>[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i128; 2]>[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i128; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i128; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i128; 2]>[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i128; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i128; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i128; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u128; 2]>[src]

pub const fn new(x0: u128, x1: u128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 2]>[src]

pub fn rotate_left(self, n: u128x2) -> u128x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u128x2) -> u128x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u128; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u128; 2]>[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u128; 2]>[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

+

pub fn min_element(self) -> u128[src]

Smallest vector element value.

+

impl Simd<[u128; 2]>[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u128; 2]>[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u128; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u128; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u128; 2]>[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u128; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u128; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u128; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m128; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m128; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m128; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m128; 2]>[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m128; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m128; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m128; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m128; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m128; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i8; 64]>[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8,
    x16: i8,
    x17: i8,
    x18: i8,
    x19: i8,
    x20: i8,
    x21: i8,
    x22: i8,
    x23: i8,
    x24: i8,
    x25: i8,
    x26: i8,
    x27: i8,
    x28: i8,
    x29: i8,
    x30: i8,
    x31: i8,
    x32: i8,
    x33: i8,
    x34: i8,
    x35: i8,
    x36: i8,
    x37: i8,
    x38: i8,
    x39: i8,
    x40: i8,
    x41: i8,
    x42: i8,
    x43: i8,
    x44: i8,
    x45: i8,
    x46: i8,
    x47: i8,
    x48: i8,
    x49: i8,
    x50: i8,
    x51: i8,
    x52: i8,
    x53: i8,
    x54: i8,
    x55: i8,
    x56: i8,
    x57: i8,
    x58: i8,
    x59: i8,
    x60: i8,
    x61: i8,
    x62: i8,
    x63: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 64]>[src]

pub fn rotate_left(self, n: i8x64) -> i8x64[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x64) -> i8x64[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i8; 64]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i8; 64]>[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i8; 64]>[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl Simd<[i8; 64]>[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i8; 64]>[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 64]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i8; 64]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i8; 64]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i8; 64]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i8; 64]>[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i8; 64]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x64>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i8; 64]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x64>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i8; 64]>[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u8; 64]>[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8,
    x16: u8,
    x17: u8,
    x18: u8,
    x19: u8,
    x20: u8,
    x21: u8,
    x22: u8,
    x23: u8,
    x24: u8,
    x25: u8,
    x26: u8,
    x27: u8,
    x28: u8,
    x29: u8,
    x30: u8,
    x31: u8,
    x32: u8,
    x33: u8,
    x34: u8,
    x35: u8,
    x36: u8,
    x37: u8,
    x38: u8,
    x39: u8,
    x40: u8,
    x41: u8,
    x42: u8,
    x43: u8,
    x44: u8,
    x45: u8,
    x46: u8,
    x47: u8,
    x48: u8,
    x49: u8,
    x50: u8,
    x51: u8,
    x52: u8,
    x53: u8,
    x54: u8,
    x55: u8,
    x56: u8,
    x57: u8,
    x58: u8,
    x59: u8,
    x60: u8,
    x61: u8,
    x62: u8,
    x63: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 64]>[src]

pub fn rotate_left(self, n: u8x64) -> u8x64[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x64) -> u8x64[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u8; 64]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u8; 64]>[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u8; 64]>[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl Simd<[u8; 64]>[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u8; 64]>[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 64]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u8; 64]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u8; 64]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u8; 64]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u8; 64]>[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u8; 64]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x64>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u8; 64]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x64>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u8; 64]>[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m8; 64]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool,
    x32: bool,
    x33: bool,
    x34: bool,
    x35: bool,
    x36: bool,
    x37: bool,
    x38: bool,
    x39: bool,
    x40: bool,
    x41: bool,
    x42: bool,
    x43: bool,
    x44: bool,
    x45: bool,
    x46: bool,
    x47: bool,
    x48: bool,
    x49: bool,
    x50: bool,
    x51: bool,
    x52: bool,
    x53: bool,
    x54: bool,
    x55: bool,
    x56: bool,
    x57: bool,
    x58: bool,
    x59: bool,
    x60: bool,
    x61: bool,
    x62: bool,
    x63: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m8; 64]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m8; 64]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m8; 64]>[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m8; 64]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 64] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m8; 64]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x64>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m8; 64]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x64>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m8; 64]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m8; 64]>[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i16; 32]>[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16,
    x8: i16,
    x9: i16,
    x10: i16,
    x11: i16,
    x12: i16,
    x13: i16,
    x14: i16,
    x15: i16,
    x16: i16,
    x17: i16,
    x18: i16,
    x19: i16,
    x20: i16,
    x21: i16,
    x22: i16,
    x23: i16,
    x24: i16,
    x25: i16,
    x26: i16,
    x27: i16,
    x28: i16,
    x29: i16,
    x30: i16,
    x31: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 32]>[src]

pub fn rotate_left(self, n: i16x32) -> i16x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x32) -> i16x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i16; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i16; 32]>[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i16; 32]>[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl Simd<[i16; 32]>[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i16; 32]>[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i16; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i16; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i16; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i16; 32]>[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i16; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i16; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x32>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i16; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u16; 32]>[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16,
    x8: u16,
    x9: u16,
    x10: u16,
    x11: u16,
    x12: u16,
    x13: u16,
    x14: u16,
    x15: u16,
    x16: u16,
    x17: u16,
    x18: u16,
    x19: u16,
    x20: u16,
    x21: u16,
    x22: u16,
    x23: u16,
    x24: u16,
    x25: u16,
    x26: u16,
    x27: u16,
    x28: u16,
    x29: u16,
    x30: u16,
    x31: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 32]>[src]

pub fn rotate_left(self, n: u16x32) -> u16x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x32) -> u16x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u16; 32]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u16; 32]>[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u16; 32]>[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl Simd<[u16; 32]>[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u16; 32]>[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 32]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u16; 32]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u16; 32]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u16; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u16; 32]>[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u16; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u16; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x32>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u16; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m16; 32]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m16; 32]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m16; 32]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m16; 32]>[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m16; 32]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 32] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m16; 32]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m16; 32]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x32>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m16; 32]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m16; 32]>[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i32; 16]>[src]

pub const fn new(
    x0: i32,
    x1: i32,
    x2: i32,
    x3: i32,
    x4: i32,
    x5: i32,
    x6: i32,
    x7: i32,
    x8: i32,
    x9: i32,
    x10: i32,
    x11: i32,
    x12: i32,
    x13: i32,
    x14: i32,
    x15: i32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 16]>[src]

pub fn rotate_left(self, n: i32x16) -> i32x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x16) -> i32x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i32; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i32; 16]>[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i32; 16]>[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl Simd<[i32; 16]>[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i32; 16]>[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i32; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i32; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i32; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i32; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i32; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u32; 16]>[src]

pub const fn new(
    x0: u32,
    x1: u32,
    x2: u32,
    x3: u32,
    x4: u32,
    x5: u32,
    x6: u32,
    x7: u32,
    x8: u32,
    x9: u32,
    x10: u32,
    x11: u32,
    x12: u32,
    x13: u32,
    x14: u32,
    x15: u32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 16]>[src]

pub fn rotate_left(self, n: u32x16) -> u32x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x16) -> u32x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u32; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u32; 16]>[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u32; 16]>[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl Simd<[u32; 16]>[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u32; 16]>[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u32; 16]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u32; 16]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u32; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u32; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u32; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[f32; 16]>[src]

pub const fn new(
    x0: f32,
    x1: f32,
    x2: f32,
    x3: f32,
    x4: f32,
    x5: f32,
    x6: f32,
    x7: f32,
    x8: f32,
    x9: f32,
    x10: f32,
    x11: f32,
    x12: f32,
    x13: f32,
    x14: f32,
    x15: f32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 16]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[f32; 16]>[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl Simd<[f32; 16]>[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl Simd<[f32; 16]>[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 16]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[f32; 16]>[src]

pub const EPSILON: f32x16[src]

Machine epsilon value.

+

pub const MIN: f32x16[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x16[src]

Smallest positive normal value.

+

pub const MAX: f32x16[src]

Largest finite value.

+

pub const NAN: f32x16[src]

Not a Number (NaN).

+

pub const INFINITY: f32x16[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x16[src]

Negative infinity (-∞).

+

pub const PI: f32x16[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x16[src]

π/2

+

pub const FRAC_PI_3: f32x16[src]

π/3

+

pub const FRAC_PI_4: f32x16[src]

π/4

+

pub const FRAC_PI_6: f32x16[src]

π/6

+

pub const FRAC_PI_8: f32x16[src]

π/8

+

pub const FRAC_1_PI: f32x16[src]

1/π

+

pub const FRAC_2_PI: f32x16[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x16[src]

2/sqrt(π)

+

pub const SQRT_2: f32x16[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x16[src]

1/sqrt(2)

+

pub const E: f32x16[src]

Euler's number (e)

+

pub const LOG2_E: f32x16[src]

log2(e)

+

pub const LOG10_E: f32x16[src]

log10(e)

+

pub const LN_2: f32x16[src]

ln(2)

+

pub const LN_10: f32x16[src]

ln(10)

+

impl Simd<[f32; 16]>[src]

pub fn is_nan(self) -> m32x16[src]

pub fn is_infinite(self) -> m32x16[src]

pub fn is_finite(self) -> m32x16[src]

impl Simd<[f32; 16]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl Simd<[f32; 16]>[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl Simd<[f32; 16]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl Simd<[f32; 16]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl Simd<[f32; 16]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl Simd<[f32; 16]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl Simd<[f32; 16]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl Simd<[f32; 16]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 16]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 16]>[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl Simd<[f32; 16]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f32; 16]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f32; 16]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl Simd<[f32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m32; 16]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m32; 16]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m32; 16]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m32; 16]>[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m32; 16]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m32; 16]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m32; 16]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x16>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m32; 16]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m32; 16]>[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i64; 8]>[src]

pub const fn new(
    x0: i64,
    x1: i64,
    x2: i64,
    x3: i64,
    x4: i64,
    x5: i64,
    x6: i64,
    x7: i64
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 8]>[src]

pub fn rotate_left(self, n: i64x8) -> i64x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i64x8) -> i64x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i64; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i64; 8]>[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i64; 8]>[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

+

pub fn min_element(self) -> i64[src]

Smallest vector element value.

+

impl Simd<[i64; 8]>[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i64; 8]>[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i64; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i64; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i64; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i64; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i64; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u64; 8]>[src]

pub const fn new(
    x0: u64,
    x1: u64,
    x2: u64,
    x3: u64,
    x4: u64,
    x5: u64,
    x6: u64,
    x7: u64
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 8]>[src]

pub fn rotate_left(self, n: u64x8) -> u64x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u64x8) -> u64x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u64; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u64; 8]>[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u64; 8]>[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

+

pub fn min_element(self) -> u64[src]

Smallest vector element value.

+

impl Simd<[u64; 8]>[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u64; 8]>[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u64; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u64; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u64; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u64; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u64; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[f64; 8]>[src]

pub const fn new(
    x0: f64,
    x1: f64,
    x2: f64,
    x3: f64,
    x4: f64,
    x5: f64,
    x6: f64,
    x7: f64
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[f64; 8]>[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl Simd<[f64; 8]>[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

+

pub fn min_element(self) -> f64[src]

Smallest vector element value.

+

impl Simd<[f64; 8]>[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[f64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[f64; 8]>[src]

pub const EPSILON: f64x8[src]

Machine epsilon value.

+

pub const MIN: f64x8[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f64x8[src]

Smallest positive normal value.

+

pub const MAX: f64x8[src]

Largest finite value.

+

pub const NAN: f64x8[src]

Not a Number (NaN).

+

pub const INFINITY: f64x8[src]

Infinity (∞).

+

pub const NEG_INFINITY: f64x8[src]

Negative infinity (-∞).

+

pub const PI: f64x8[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f64x8[src]

π/2

+

pub const FRAC_PI_3: f64x8[src]

π/3

+

pub const FRAC_PI_4: f64x8[src]

π/4

+

pub const FRAC_PI_6: f64x8[src]

π/6

+

pub const FRAC_PI_8: f64x8[src]

π/8

+

pub const FRAC_1_PI: f64x8[src]

1/π

+

pub const FRAC_2_PI: f64x8[src]

2/π

+

pub const FRAC_2_SQRT_PI: f64x8[src]

2/sqrt(π)

+

pub const SQRT_2: f64x8[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f64x8[src]

1/sqrt(2)

+

pub const E: f64x8[src]

Euler's number (e)

+

pub const LOG2_E: f64x8[src]

log2(e)

+

pub const LOG10_E: f64x8[src]

log10(e)

+

pub const LN_2: f64x8[src]

ln(2)

+

pub const LN_10: f64x8[src]

ln(10)

+

impl Simd<[f64; 8]>[src]

pub fn is_nan(self) -> m64x8[src]

pub fn is_infinite(self) -> m64x8[src]

pub fn is_finite(self) -> m64x8[src]

impl Simd<[f64; 8]>[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl Simd<[f64; 8]>[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl Simd<[f64; 8]>[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl Simd<[f64; 8]>[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl Simd<[f64; 8]>[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl Simd<[f64; 8]>[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl Simd<[f64; 8]>[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl Simd<[f64; 8]>[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 8]>[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 8]>[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl Simd<[f64; 8]>[src]

pub fn sqrt(self) -> Self[src]

impl Simd<[f64; 8]>[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl Simd<[f64; 8]>[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl Simd<[f64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m64; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m64; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m64; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m64; 8]>[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m64; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m64; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m64; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m64; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m64; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[i128; 4]>[src]

pub const fn new(x0: i128, x1: i128, x2: i128, x3: i128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 4]>[src]

pub fn rotate_left(self, n: i128x4) -> i128x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i128x4) -> i128x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[i128; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[i128; 4]>[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[i128; 4]>[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

+

pub fn min_element(self) -> i128[src]

Smallest vector element value.

+

impl Simd<[i128; 4]>[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[i128; 4]>[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[i128; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[i128; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[i128; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[i128; 4]>[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[i128; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[i128; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[i128; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[u128; 4]>[src]

pub const fn new(x0: u128, x1: u128, x2: u128, x3: u128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 4]>[src]

pub fn rotate_left(self, n: u128x4) -> u128x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u128x4) -> u128x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[u128; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[u128; 4]>[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[u128; 4]>[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

+

pub fn min_element(self) -> u128[src]

Smallest vector element value.

+

impl Simd<[u128; 4]>[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[u128; 4]>[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[u128; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[u128; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[u128; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[u128; 4]>[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[u128; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[u128; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[u128; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[m128; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[m128; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[m128; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[m128; 4]>[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[m128; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[m128; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[m128; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[m128; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[m128; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[isize; 2]>[src]

pub const fn new(x0: isize, x1: isize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 2]>[src]

pub fn rotate_left(self, n: isizex2) -> isizex2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: isizex2) -> isizex2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[isize; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[isize; 2]>[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[isize; 2]>[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

+

pub fn min_element(self) -> isize[src]

Smallest vector element value.

+

impl Simd<[isize; 2]>[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[isize; 2]>[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[isize; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[isize; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[isize; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[isize; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[isize; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[isize; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[usize; 2]>[src]

pub const fn new(x0: usize, x1: usize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 2]>[src]

pub fn rotate_left(self, n: usizex2) -> usizex2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: usizex2) -> usizex2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[usize; 2]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[usize; 2]>[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[usize; 2]>[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

+

pub fn min_element(self) -> usize[src]

Smallest vector element value.

+

impl Simd<[usize; 2]>[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[usize; 2]>[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 2]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[usize; 2]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[usize; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[usize; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[usize; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[usize; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[usize; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[msize; 2]>[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[msize; 2]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[msize; 2]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[msize; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[msize; 2]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[msize; 2]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex2>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[msize; 2]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex2>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[msize; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[msize; 2]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[isize; 4]>[src]

pub const fn new(x0: isize, x1: isize, x2: isize, x3: isize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 4]>[src]

pub fn rotate_left(self, n: isizex4) -> isizex4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: isizex4) -> isizex4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[isize; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[isize; 4]>[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[isize; 4]>[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

+

pub fn min_element(self) -> isize[src]

Smallest vector element value.

+

impl Simd<[isize; 4]>[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[isize; 4]>[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[isize; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[isize; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[isize; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[isize; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[isize; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[isize; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[usize; 4]>[src]

pub const fn new(x0: usize, x1: usize, x2: usize, x3: usize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 4]>[src]

pub fn rotate_left(self, n: usizex4) -> usizex4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: usizex4) -> usizex4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[usize; 4]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[usize; 4]>[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[usize; 4]>[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

+

pub fn min_element(self) -> usize[src]

Smallest vector element value.

+

impl Simd<[usize; 4]>[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[usize; 4]>[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 4]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[usize; 4]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[usize; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[usize; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[usize; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[usize; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[usize; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[msize; 4]>[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[msize; 4]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[msize; 4]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[msize; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[msize; 4]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[msize; 4]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex4>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[msize; 4]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex4>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[msize; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[msize; 4]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[isize; 8]>[src]

pub const fn new(
    x0: isize,
    x1: isize,
    x2: isize,
    x3: isize,
    x4: isize,
    x5: isize,
    x6: isize,
    x7: isize
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 8]>[src]

pub fn rotate_left(self, n: isizex8) -> isizex8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: isizex8) -> isizex8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[isize; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[isize; 8]>[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[isize; 8]>[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

+

pub fn min_element(self) -> isize[src]

Smallest vector element value.

+

impl Simd<[isize; 8]>[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[isize; 8]>[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[isize; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[isize; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[isize; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[isize; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[isize; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[isize; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[isize; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[usize; 8]>[src]

pub const fn new(
    x0: usize,
    x1: usize,
    x2: usize,
    x3: usize,
    x4: usize,
    x5: usize,
    x6: usize,
    x7: usize
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 8]>[src]

pub fn rotate_left(self, n: usizex8) -> usizex8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: usizex8) -> usizex8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl Simd<[usize; 8]>[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl Simd<[usize; 8]>[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl Simd<[usize; 8]>[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

+

pub fn min_element(self) -> usize[src]

Smallest vector element value.

+

impl Simd<[usize; 8]>[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[usize; 8]>[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl Simd<[usize; 8]>[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl Simd<[usize; 8]>[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl Simd<[usize; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[usize; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[usize; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[usize; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[usize; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl Simd<[msize; 8]>[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl Simd<[msize; 8]>[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl Simd<[msize; 8]>[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl Simd<[msize; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl Simd<[msize; 8]>[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl Simd<[msize; 8]>[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex8>[src]

Returns a wrapper that implements PartialOrd.

+

impl Simd<[msize; 8]>[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex8>[src]

Returns a wrapper that implements Ord.

+

impl Simd<[msize; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl Simd<[msize; 8]>[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

impl<T> Simd<[*const T; 2]>[src]

pub const fn new(x0: *const T, x1: *const T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex2[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> Simd<[*const T; 2]>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 2]>[src]

pub unsafe fn offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> Simd<[*const T; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> Simd<[*const T; 2]> where
    [T; 2]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 2]>,
    value: Simd<[T; 2]>
) -> Simd<[T; 2]> where
    M: Mask,
    [M; 2]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*mut T; 2]>[src]

pub const fn new(x0: *mut T, x1: *mut T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex2[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 2]>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> Simd<[*mut T; 2]>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 2]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 2]>[src]

pub unsafe fn offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> Simd<[*mut T; 2]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> Simd<[*mut T; 2]> where
    [T; 2]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 2]>,
    value: Simd<[T; 2]>
) -> Simd<[T; 2]> where
    M: Mask,
    [M; 2]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*mut T; 2]> where
    [T; 2]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 2]>, value: Simd<[T; 2]>) where
    M: Mask,
    [M; 2]: SimdArray
[src]

Writes selected vector elements to memory.

+

Writes the lanes of values for which the mask is true to +their corresponding memory addresses in self.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Overlapping memory addresses of self are written to in order +from the lest-significant to the most-significant element.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*const T; 4]>[src]

pub const fn new(x0: *const T, x1: *const T, x2: *const T, x3: *const T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex4[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> Simd<[*const T; 4]>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 4]>[src]

pub unsafe fn offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> Simd<[*const T; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> Simd<[*const T; 4]> where
    [T; 4]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 4]>,
    value: Simd<[T; 4]>
) -> Simd<[T; 4]> where
    M: Mask,
    [M; 4]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*mut T; 4]>[src]

pub const fn new(x0: *mut T, x1: *mut T, x2: *mut T, x3: *mut T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex4[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 4]>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> Simd<[*mut T; 4]>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 4]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 4]>[src]

pub unsafe fn offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> Simd<[*mut T; 4]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> Simd<[*mut T; 4]> where
    [T; 4]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 4]>,
    value: Simd<[T; 4]>
) -> Simd<[T; 4]> where
    M: Mask,
    [M; 4]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*mut T; 4]> where
    [T; 4]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 4]>, value: Simd<[T; 4]>) where
    M: Mask,
    [M; 4]: SimdArray
[src]

Writes selected vector elements to memory.

+

Writes the lanes of values for which the mask is true to +their corresponding memory addresses in self.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Overlapping memory addresses of self are written to in order +from the lest-significant to the most-significant element.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*const T; 8]>[src]

pub const fn new(
    x0: *const T,
    x1: *const T,
    x2: *const T,
    x3: *const T,
    x4: *const T,
    x5: *const T,
    x6: *const T,
    x7: *const T
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex8[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> Simd<[*const T; 8]>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*const T; 8]>[src]

pub unsafe fn offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> Simd<[*const T; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> Simd<[*const T; 8]> where
    [T; 8]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 8]>,
    value: Simd<[T; 8]>
) -> Simd<[T; 8]> where
    M: Mask,
    [M; 8]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*mut T; 8]>[src]

pub const fn new(
    x0: *mut T,
    x1: *mut T,
    x2: *mut T,
    x3: *mut T,
    x4: *mut T,
    x5: *mut T,
    x6: *mut T,
    x7: *mut T
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex8[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 8]>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> Simd<[*mut T; 8]>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 8]>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> Simd<[*mut T; 8]>[src]

pub unsafe fn offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> Simd<[*mut T; 8]>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> Simd<[*mut T; 8]> where
    [T; 8]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 8]>,
    value: Simd<[T; 8]>
) -> Simd<[T; 8]> where
    M: Mask,
    [M; 8]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> Simd<[*mut T; 8]> where
    [T; 8]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 8]>, value: Simd<[T; 8]>) where
    M: Mask,
    [M; 8]: SimdArray
[src]

Writes selected vector elements to memory.

+

Writes the lanes of values for which the mask is true to +their corresponding memory addresses in self.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Overlapping memory addresses of self are written to in order +from the lest-significant to the most-significant element.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

Trait Implementations

impl Add<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the + operator.

+

impl Add<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the + operator.

+

impl Add<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the + operator.

+

impl Add<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the + operator.

+

impl Add<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the + operator.

+

impl Add<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the + operator.

+

impl Add<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the + operator.

+

impl Add<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the + operator.

+

impl Add<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the + operator.

+

impl Add<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the + operator.

+

impl Add<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the + operator.

+

impl Add<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the + operator.

+

impl Add<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the + operator.

+

impl Add<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the + operator.

+

impl Add<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the + operator.

+

impl Add<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the + operator.

+

impl Add<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the + operator.

+

impl Add<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the + operator.

+

impl Add<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the + operator.

+

impl Add<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the + operator.

+

impl Add<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the + operator.

+

impl Add<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the + operator.

+

impl Add<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the + operator.

+

impl Add<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the + operator.

+

impl Add<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f32; 16]>> for f32x16[src]

impl AddAssign<Simd<[f32; 2]>> for f32x2[src]

impl AddAssign<Simd<[f32; 4]>> for f32x4[src]

impl AddAssign<Simd<[f32; 8]>> for f32x8[src]

impl AddAssign<Simd<[f64; 2]>> for f64x2[src]

impl AddAssign<Simd<[f64; 4]>> for f64x4[src]

impl AddAssign<Simd<[f64; 8]>> for f64x8[src]

impl AddAssign<Simd<[i128; 1]>> for i128x1[src]

impl AddAssign<Simd<[i128; 2]>> for i128x2[src]

impl AddAssign<Simd<[i128; 4]>> for i128x4[src]

impl AddAssign<Simd<[i16; 16]>> for i16x16[src]

impl AddAssign<Simd<[i16; 2]>> for i16x2[src]

impl AddAssign<Simd<[i16; 32]>> for i16x32[src]

impl AddAssign<Simd<[i16; 4]>> for i16x4[src]

impl AddAssign<Simd<[i16; 8]>> for i16x8[src]

impl AddAssign<Simd<[i32; 16]>> for i32x16[src]

impl AddAssign<Simd<[i32; 2]>> for i32x2[src]

impl AddAssign<Simd<[i32; 4]>> for i32x4[src]

impl AddAssign<Simd<[i32; 8]>> for i32x8[src]

impl AddAssign<Simd<[i64; 2]>> for i64x2[src]

impl AddAssign<Simd<[i64; 4]>> for i64x4[src]

impl AddAssign<Simd<[i64; 8]>> for i64x8[src]

impl AddAssign<Simd<[i8; 16]>> for i8x16[src]

impl AddAssign<Simd<[i8; 2]>> for i8x2[src]

impl AddAssign<Simd<[i8; 32]>> for i8x32[src]

impl AddAssign<Simd<[i8; 4]>> for i8x4[src]

impl AddAssign<Simd<[i8; 64]>> for i8x64[src]

impl AddAssign<Simd<[i8; 8]>> for i8x8[src]

impl AddAssign<Simd<[isize; 2]>> for isizex2[src]

impl AddAssign<Simd<[isize; 4]>> for isizex4[src]

impl AddAssign<Simd<[isize; 8]>> for isizex8[src]

impl AddAssign<Simd<[u128; 1]>> for u128x1[src]

impl AddAssign<Simd<[u128; 2]>> for u128x2[src]

impl AddAssign<Simd<[u128; 4]>> for u128x4[src]

impl AddAssign<Simd<[u16; 16]>> for u16x16[src]

impl AddAssign<Simd<[u16; 2]>> for u16x2[src]

impl AddAssign<Simd<[u16; 32]>> for u16x32[src]

impl AddAssign<Simd<[u16; 4]>> for u16x4[src]

impl AddAssign<Simd<[u16; 8]>> for u16x8[src]

impl AddAssign<Simd<[u32; 16]>> for u32x16[src]

impl AddAssign<Simd<[u32; 2]>> for u32x2[src]

impl AddAssign<Simd<[u32; 4]>> for u32x4[src]

impl AddAssign<Simd<[u32; 8]>> for u32x8[src]

impl AddAssign<Simd<[u64; 2]>> for u64x2[src]

impl AddAssign<Simd<[u64; 4]>> for u64x4[src]

impl AddAssign<Simd<[u64; 8]>> for u64x8[src]

impl AddAssign<Simd<[u8; 16]>> for u8x16[src]

impl AddAssign<Simd<[u8; 2]>> for u8x2[src]

impl AddAssign<Simd<[u8; 32]>> for u8x32[src]

impl AddAssign<Simd<[u8; 4]>> for u8x4[src]

impl AddAssign<Simd<[u8; 64]>> for u8x64[src]

impl AddAssign<Simd<[u8; 8]>> for u8x8[src]

impl AddAssign<Simd<[usize; 2]>> for usizex2[src]

impl AddAssign<Simd<[usize; 4]>> for usizex4[src]

impl AddAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitAnd<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m128; 1]>> for bool[src]

type Output = m128x1

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m128; 2]>> for bool[src]

type Output = m128x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m128; 4]>> for bool[src]

type Output = m128x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 16]>> for bool[src]

type Output = m16x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 2]>> for bool[src]

type Output = m16x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 32]>> for bool[src]

type Output = m16x32

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 4]>> for bool[src]

type Output = m16x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m16; 8]>> for bool[src]

type Output = m16x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 16]>> for bool[src]

type Output = m32x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 2]>> for bool[src]

type Output = m32x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 4]>> for bool[src]

type Output = m32x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m32; 8]>> for bool[src]

type Output = m32x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m64; 2]>> for bool[src]

type Output = m64x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m64; 4]>> for bool[src]

type Output = m64x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m64; 8]>> for bool[src]

type Output = m64x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 16]>> for bool[src]

type Output = m8x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 2]>> for bool[src]

type Output = m8x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 32]>> for bool[src]

type Output = m8x32

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 4]>> for bool[src]

type Output = m8x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 64]>> for bool[src]

type Output = m8x64

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[m8; 8]>> for bool[src]

type Output = m8x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[msize; 2]>> for bool[src]

type Output = msizex2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[msize; 4]>> for bool[src]

type Output = msizex4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[msize; 8]>> for bool[src]

type Output = msizex8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitAndAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitAndAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitAndAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitAndAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitAndAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitAndAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitAndAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitAndAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitAndAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitAndAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitAndAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitAndAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitAndAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitAndAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitAndAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitAndAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitAndAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitAndAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitAndAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitAndAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitAndAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitAndAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitAndAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitAndAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitAndAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitAndAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitAndAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitAndAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitAndAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitAndAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitAndAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitAndAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitAndAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitAndAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitAndAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitAndAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitAndAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitAndAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitAndAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitAndAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitAndAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitAndAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitAndAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitAndAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitAndAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitAndAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitAndAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitAndAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitAndAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitAndAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitAndAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitAndAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitAndAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitAndAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitAndAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitAndAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitAndAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitAndAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitAndAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitAndAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitAndAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitAndAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitAndAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitAndAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitAndAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitAndAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitAndAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitAndAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitAndAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitAndAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitAndAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitOr<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m128; 1]>> for bool[src]

type Output = m128x1

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m128; 2]>> for bool[src]

type Output = m128x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m128; 4]>> for bool[src]

type Output = m128x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 16]>> for bool[src]

type Output = m16x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 2]>> for bool[src]

type Output = m16x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 32]>> for bool[src]

type Output = m16x32

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 4]>> for bool[src]

type Output = m16x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m16; 8]>> for bool[src]

type Output = m16x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 16]>> for bool[src]

type Output = m32x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 2]>> for bool[src]

type Output = m32x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 4]>> for bool[src]

type Output = m32x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m32; 8]>> for bool[src]

type Output = m32x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m64; 2]>> for bool[src]

type Output = m64x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m64; 4]>> for bool[src]

type Output = m64x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m64; 8]>> for bool[src]

type Output = m64x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 16]>> for bool[src]

type Output = m8x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 2]>> for bool[src]

type Output = m8x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 32]>> for bool[src]

type Output = m8x32

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 4]>> for bool[src]

type Output = m8x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 64]>> for bool[src]

type Output = m8x64

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[m8; 8]>> for bool[src]

type Output = m8x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[msize; 2]>> for bool[src]

type Output = msizex2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[msize; 4]>> for bool[src]

type Output = msizex4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[msize; 8]>> for bool[src]

type Output = msizex8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the | operator.

+

impl BitOr<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the | operator.

+

impl BitOr<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the | operator.

+

impl BitOr<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitOrAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitOrAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitOrAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitOrAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitOrAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitOrAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitOrAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitOrAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitOrAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitOrAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitOrAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitOrAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitOrAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitOrAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitOrAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitOrAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitOrAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitOrAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitOrAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitOrAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitOrAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitOrAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitOrAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitOrAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitOrAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitOrAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitOrAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitOrAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitOrAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitOrAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitOrAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitOrAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitOrAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitOrAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitOrAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitOrAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitOrAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitOrAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitOrAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitOrAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitOrAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitOrAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitOrAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitOrAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitOrAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitOrAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitOrAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitOrAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitOrAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitOrAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitOrAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitOrAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitOrAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitOrAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitOrAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitOrAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitOrAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitOrAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitOrAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitOrAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitOrAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitOrAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitOrAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitOrAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitOrAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitOrAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitOrAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitOrAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitOrAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitOrAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitOrAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitXor<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m128; 1]>> for bool[src]

type Output = m128x1

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m128; 2]>> for bool[src]

type Output = m128x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m128; 4]>> for bool[src]

type Output = m128x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 16]>> for bool[src]

type Output = m16x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 2]>> for bool[src]

type Output = m16x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 32]>> for bool[src]

type Output = m16x32

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 4]>> for bool[src]

type Output = m16x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m16; 8]>> for bool[src]

type Output = m16x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 16]>> for bool[src]

type Output = m32x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 2]>> for bool[src]

type Output = m32x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 4]>> for bool[src]

type Output = m32x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m32; 8]>> for bool[src]

type Output = m32x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m64; 2]>> for bool[src]

type Output = m64x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m64; 4]>> for bool[src]

type Output = m64x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m64; 8]>> for bool[src]

type Output = m64x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 16]>> for bool[src]

type Output = m8x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 2]>> for bool[src]

type Output = m8x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 32]>> for bool[src]

type Output = m8x32

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 4]>> for bool[src]

type Output = m8x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 64]>> for bool[src]

type Output = m8x64

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[m8; 8]>> for bool[src]

type Output = m8x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[msize; 2]>> for bool[src]

type Output = msizex2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[msize; 4]>> for bool[src]

type Output = msizex4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[msize; 8]>> for bool[src]

type Output = msizex8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitXorAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitXorAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitXorAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitXorAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitXorAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitXorAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitXorAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitXorAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitXorAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitXorAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitXorAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitXorAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitXorAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitXorAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitXorAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitXorAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitXorAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitXorAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitXorAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitXorAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitXorAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitXorAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitXorAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitXorAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitXorAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitXorAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitXorAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitXorAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitXorAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitXorAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitXorAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitXorAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitXorAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitXorAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitXorAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitXorAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitXorAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitXorAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitXorAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitXorAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitXorAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitXorAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitXorAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitXorAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitXorAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitXorAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitXorAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitXorAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitXorAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitXorAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitXorAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitXorAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitXorAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitXorAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitXorAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitXorAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitXorAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitXorAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitXorAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitXorAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitXorAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitXorAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitXorAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitXorAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitXorAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitXorAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitXorAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitXorAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitXorAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitXorAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitXorAssign<Simd<[usize; 8]>> for usizex8[src]

impl<A: Clone + SimdArray> Clone for Simd<A>[src]

impl<A: Copy + SimdArray> Copy for Simd<A>[src]

impl Div<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the / operator.

+

impl Div<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the / operator.

+

impl Div<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the / operator.

+

impl Div<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the / operator.

+

impl Div<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the / operator.

+

impl Div<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the / operator.

+

impl Div<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the / operator.

+

impl Div<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the / operator.

+

impl Div<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the / operator.

+

impl Div<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the / operator.

+

impl Div<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the / operator.

+

impl Div<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the / operator.

+

impl Div<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the / operator.

+

impl Div<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the / operator.

+

impl Div<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the / operator.

+

impl Div<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the / operator.

+

impl Div<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the / operator.

+

impl Div<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the / operator.

+

impl Div<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the / operator.

+

impl Div<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the / operator.

+

impl Div<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the / operator.

+

impl Div<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the / operator.

+

impl Div<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the / operator.

+

impl Div<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the / operator.

+

impl Div<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f32; 16]>> for f32x16[src]

impl DivAssign<Simd<[f32; 2]>> for f32x2[src]

impl DivAssign<Simd<[f32; 4]>> for f32x4[src]

impl DivAssign<Simd<[f32; 8]>> for f32x8[src]

impl DivAssign<Simd<[f64; 2]>> for f64x2[src]

impl DivAssign<Simd<[f64; 4]>> for f64x4[src]

impl DivAssign<Simd<[f64; 8]>> for f64x8[src]

impl DivAssign<Simd<[i128; 1]>> for i128x1[src]

impl DivAssign<Simd<[i128; 2]>> for i128x2[src]

impl DivAssign<Simd<[i128; 4]>> for i128x4[src]

impl DivAssign<Simd<[i16; 16]>> for i16x16[src]

impl DivAssign<Simd<[i16; 2]>> for i16x2[src]

impl DivAssign<Simd<[i16; 32]>> for i16x32[src]

impl DivAssign<Simd<[i16; 4]>> for i16x4[src]

impl DivAssign<Simd<[i16; 8]>> for i16x8[src]

impl DivAssign<Simd<[i32; 16]>> for i32x16[src]

impl DivAssign<Simd<[i32; 2]>> for i32x2[src]

impl DivAssign<Simd<[i32; 4]>> for i32x4[src]

impl DivAssign<Simd<[i32; 8]>> for i32x8[src]

impl DivAssign<Simd<[i64; 2]>> for i64x2[src]

impl DivAssign<Simd<[i64; 4]>> for i64x4[src]

impl DivAssign<Simd<[i64; 8]>> for i64x8[src]

impl DivAssign<Simd<[i8; 16]>> for i8x16[src]

impl DivAssign<Simd<[i8; 2]>> for i8x2[src]

impl DivAssign<Simd<[i8; 32]>> for i8x32[src]

impl DivAssign<Simd<[i8; 4]>> for i8x4[src]

impl DivAssign<Simd<[i8; 64]>> for i8x64[src]

impl DivAssign<Simd<[i8; 8]>> for i8x8[src]

impl DivAssign<Simd<[isize; 2]>> for isizex2[src]

impl DivAssign<Simd<[isize; 4]>> for isizex4[src]

impl DivAssign<Simd<[isize; 8]>> for isizex8[src]

impl DivAssign<Simd<[u128; 1]>> for u128x1[src]

impl DivAssign<Simd<[u128; 2]>> for u128x2[src]

impl DivAssign<Simd<[u128; 4]>> for u128x4[src]

impl DivAssign<Simd<[u16; 16]>> for u16x16[src]

impl DivAssign<Simd<[u16; 2]>> for u16x2[src]

impl DivAssign<Simd<[u16; 32]>> for u16x32[src]

impl DivAssign<Simd<[u16; 4]>> for u16x4[src]

impl DivAssign<Simd<[u16; 8]>> for u16x8[src]

impl DivAssign<Simd<[u32; 16]>> for u32x16[src]

impl DivAssign<Simd<[u32; 2]>> for u32x2[src]

impl DivAssign<Simd<[u32; 4]>> for u32x4[src]

impl DivAssign<Simd<[u32; 8]>> for u32x8[src]

impl DivAssign<Simd<[u64; 2]>> for u64x2[src]

impl DivAssign<Simd<[u64; 4]>> for u64x4[src]

impl DivAssign<Simd<[u64; 8]>> for u64x8[src]

impl DivAssign<Simd<[u8; 16]>> for u8x16[src]

impl DivAssign<Simd<[u8; 2]>> for u8x2[src]

impl DivAssign<Simd<[u8; 32]>> for u8x32[src]

impl DivAssign<Simd<[u8; 4]>> for u8x4[src]

impl DivAssign<Simd<[u8; 64]>> for u8x64[src]

impl DivAssign<Simd<[u8; 8]>> for u8x8[src]

impl DivAssign<Simd<[usize; 2]>> for usizex2[src]

impl DivAssign<Simd<[usize; 4]>> for usizex4[src]

impl DivAssign<Simd<[usize; 8]>> for usizex8[src]

impl From<Simd<[f32; 16]>> for [f32; 16][src]

impl From<Simd<[f32; 2]>> for [f32; 2][src]

impl From<Simd<[f32; 2]>> for f64x2[src]

impl From<Simd<[f32; 4]>> for [f32; 4][src]

impl From<Simd<[f32; 4]>> for f64x4[src]

impl From<Simd<[f32; 8]>> for [f32; 8][src]

impl From<Simd<[f32; 8]>> for f64x8[src]

impl From<Simd<[f64; 2]>> for [f64; 2][src]

impl From<Simd<[f64; 4]>> for [f64; 4][src]

impl From<Simd<[f64; 8]>> for [f64; 8][src]

impl From<Simd<[i128; 1]>> for [i128; 1][src]

impl From<Simd<[i128; 2]>> for [i128; 2][src]

impl From<Simd<[i128; 4]>> for [i128; 4][src]

impl From<Simd<[i16; 16]>> for [i16; 16][src]

impl From<Simd<[i16; 16]>> for i32x16[src]

impl From<Simd<[i16; 16]>> for f32x16[src]

impl From<Simd<[i16; 2]>> for [i16; 2][src]

impl From<Simd<[i16; 2]>> for i32x2[src]

impl From<Simd<[i16; 2]>> for f32x2[src]

impl From<Simd<[i16; 2]>> for i64x2[src]

impl From<Simd<[i16; 2]>> for f64x2[src]

impl From<Simd<[i16; 2]>> for i128x2[src]

impl From<Simd<[i16; 32]>> for [i16; 32][src]

impl From<Simd<[i16; 4]>> for [i16; 4][src]

impl From<Simd<[i16; 4]>> for i32x4[src]

impl From<Simd<[i16; 4]>> for f32x4[src]

impl From<Simd<[i16; 4]>> for i64x4[src]

impl From<Simd<[i16; 4]>> for f64x4[src]

impl From<Simd<[i16; 4]>> for i128x4[src]

impl From<Simd<[i16; 8]>> for [i16; 8][src]

impl From<Simd<[i16; 8]>> for i32x8[src]

impl From<Simd<[i16; 8]>> for f32x8[src]

impl From<Simd<[i16; 8]>> for i64x8[src]

impl From<Simd<[i16; 8]>> for f64x8[src]

impl From<Simd<[i32; 16]>> for [i32; 16][src]

impl From<Simd<[i32; 2]>> for [i32; 2][src]

impl From<Simd<[i32; 2]>> for i64x2[src]

impl From<Simd<[i32; 2]>> for f64x2[src]

impl From<Simd<[i32; 2]>> for i128x2[src]

impl From<Simd<[i32; 4]>> for [i32; 4][src]

impl From<Simd<[i32; 4]>> for i64x4[src]

impl From<Simd<[i32; 4]>> for f64x4[src]

impl From<Simd<[i32; 4]>> for i128x4[src]

impl From<Simd<[i32; 8]>> for [i32; 8][src]

impl From<Simd<[i32; 8]>> for i64x8[src]

impl From<Simd<[i32; 8]>> for f64x8[src]

impl From<Simd<[i64; 2]>> for [i64; 2][src]

impl From<Simd<[i64; 2]>> for i128x2[src]

impl From<Simd<[i64; 4]>> for [i64; 4][src]

impl From<Simd<[i64; 4]>> for i128x4[src]

impl From<Simd<[i64; 8]>> for [i64; 8][src]

impl From<Simd<[i8; 16]>> for [i8; 16][src]

impl From<Simd<[i8; 16]>> for i16x16[src]

impl From<Simd<[i8; 16]>> for i32x16[src]

impl From<Simd<[i8; 16]>> for f32x16[src]

impl From<Simd<[i8; 2]>> for [i8; 2][src]

impl From<Simd<[i8; 2]>> for i16x2[src]

impl From<Simd<[i8; 2]>> for i32x2[src]

impl From<Simd<[i8; 2]>> for f32x2[src]

impl From<Simd<[i8; 2]>> for i64x2[src]

impl From<Simd<[i8; 2]>> for f64x2[src]

impl From<Simd<[i8; 2]>> for i128x2[src]

impl From<Simd<[i8; 32]>> for [i8; 32][src]

impl From<Simd<[i8; 32]>> for i16x32[src]

impl From<Simd<[i8; 4]>> for [i8; 4][src]

impl From<Simd<[i8; 4]>> for i16x4[src]

impl From<Simd<[i8; 4]>> for i32x4[src]

impl From<Simd<[i8; 4]>> for f32x4[src]

impl From<Simd<[i8; 4]>> for i64x4[src]

impl From<Simd<[i8; 4]>> for f64x4[src]

impl From<Simd<[i8; 4]>> for i128x4[src]

impl From<Simd<[i8; 64]>> for [i8; 64][src]

impl From<Simd<[i8; 8]>> for [i8; 8][src]

impl From<Simd<[i8; 8]>> for i16x8[src]

impl From<Simd<[i8; 8]>> for i32x8[src]

impl From<Simd<[i8; 8]>> for f32x8[src]

impl From<Simd<[i8; 8]>> for i64x8[src]

impl From<Simd<[i8; 8]>> for f64x8[src]

impl From<Simd<[isize; 2]>> for [isize; 2][src]

impl From<Simd<[isize; 4]>> for [isize; 4][src]

impl From<Simd<[isize; 8]>> for [isize; 8][src]

impl From<Simd<[m128; 1]>> for [m128; 1][src]

impl From<Simd<[m128; 2]>> for m8x2[src]

impl From<Simd<[m128; 2]>> for m16x2[src]

impl From<Simd<[m128; 2]>> for m32x2[src]

impl From<Simd<[m128; 2]>> for m64x2[src]

impl From<Simd<[m128; 2]>> for [m128; 2][src]

impl From<Simd<[m128; 4]>> for [m128; 4][src]

impl From<Simd<[m16; 16]>> for m8x16[src]

impl From<Simd<[m16; 16]>> for [m16; 16][src]

impl From<Simd<[m16; 16]>> for m32x16[src]

impl From<Simd<[m16; 2]>> for m8x2[src]

impl From<Simd<[m16; 2]>> for [m16; 2][src]

impl From<Simd<[m16; 2]>> for m32x2[src]

impl From<Simd<[m16; 2]>> for m64x2[src]

impl From<Simd<[m16; 2]>> for m128x2[src]

impl From<Simd<[m16; 32]>> for [m16; 32][src]

impl From<Simd<[m16; 4]>> for m8x4[src]

impl From<Simd<[m16; 4]>> for [m16; 4][src]

impl From<Simd<[m16; 4]>> for m32x4[src]

impl From<Simd<[m16; 4]>> for m64x4[src]

impl From<Simd<[m16; 4]>> for m128x4[src]

impl From<Simd<[m16; 8]>> for m8x8[src]

impl From<Simd<[m16; 8]>> for [m16; 8][src]

impl From<Simd<[m16; 8]>> for m32x8[src]

impl From<Simd<[m16; 8]>> for m64x8[src]

impl From<Simd<[m32; 16]>> for [m32; 16][src]

impl From<Simd<[m32; 2]>> for m8x2[src]

impl From<Simd<[m32; 2]>> for m16x2[src]

impl From<Simd<[m32; 2]>> for [m32; 2][src]

impl From<Simd<[m32; 2]>> for m64x2[src]

impl From<Simd<[m32; 2]>> for m128x2[src]

impl From<Simd<[m32; 4]>> for m8x4[src]

impl From<Simd<[m32; 4]>> for m16x4[src]

impl From<Simd<[m32; 4]>> for [m32; 4][src]

impl From<Simd<[m32; 4]>> for m64x4[src]

impl From<Simd<[m32; 4]>> for m128x4[src]

impl From<Simd<[m32; 8]>> for m8x8[src]

impl From<Simd<[m32; 8]>> for m16x8[src]

impl From<Simd<[m32; 8]>> for [m32; 8][src]

impl From<Simd<[m32; 8]>> for m64x8[src]

impl From<Simd<[m64; 2]>> for m8x2[src]

impl From<Simd<[m64; 2]>> for m16x2[src]

impl From<Simd<[m64; 2]>> for m32x2[src]

impl From<Simd<[m64; 2]>> for [m64; 2][src]

impl From<Simd<[m64; 2]>> for m128x2[src]

impl From<Simd<[m64; 4]>> for m8x4[src]

impl From<Simd<[m64; 4]>> for m16x4[src]

impl From<Simd<[m64; 4]>> for m32x4[src]

impl From<Simd<[m64; 4]>> for [m64; 4][src]

impl From<Simd<[m64; 4]>> for m128x4[src]

impl From<Simd<[m64; 8]>> for [m64; 8][src]

impl From<Simd<[m8; 16]>> for [m8; 16][src]

impl From<Simd<[m8; 16]>> for m16x16[src]

impl From<Simd<[m8; 16]>> for m32x16[src]

impl From<Simd<[m8; 2]>> for [m8; 2][src]

impl From<Simd<[m8; 2]>> for m16x2[src]

impl From<Simd<[m8; 2]>> for m32x2[src]

impl From<Simd<[m8; 2]>> for m64x2[src]

impl From<Simd<[m8; 2]>> for m128x2[src]

impl From<Simd<[m8; 32]>> for [m8; 32][src]

impl From<Simd<[m8; 32]>> for m16x32[src]

impl From<Simd<[m8; 4]>> for [m8; 4][src]

impl From<Simd<[m8; 4]>> for m16x4[src]

impl From<Simd<[m8; 4]>> for m32x4[src]

impl From<Simd<[m8; 4]>> for m64x4[src]

impl From<Simd<[m8; 4]>> for m128x4[src]

impl From<Simd<[m8; 64]>> for [m8; 64][src]

impl From<Simd<[m8; 8]>> for [m8; 8][src]

impl From<Simd<[m8; 8]>> for m16x8[src]

impl From<Simd<[m8; 8]>> for m32x8[src]

impl From<Simd<[m8; 8]>> for m64x8[src]

impl From<Simd<[msize; 2]>> for [msize; 2][src]

impl From<Simd<[msize; 4]>> for [msize; 4][src]

impl From<Simd<[msize; 8]>> for [msize; 8][src]

impl From<Simd<[u128; 1]>> for [u128; 1][src]

impl From<Simd<[u128; 2]>> for [u128; 2][src]

impl From<Simd<[u128; 4]>> for [u128; 4][src]

impl From<Simd<[u16; 16]>> for [u16; 16][src]

impl From<Simd<[u16; 16]>> for i32x16[src]

impl From<Simd<[u16; 16]>> for u32x16[src]

impl From<Simd<[u16; 16]>> for f32x16[src]

impl From<Simd<[u16; 2]>> for [u16; 2][src]

impl From<Simd<[u16; 2]>> for i32x2[src]

impl From<Simd<[u16; 2]>> for u32x2[src]

impl From<Simd<[u16; 2]>> for f32x2[src]

impl From<Simd<[u16; 2]>> for i64x2[src]

impl From<Simd<[u16; 2]>> for u64x2[src]

impl From<Simd<[u16; 2]>> for f64x2[src]

impl From<Simd<[u16; 2]>> for i128x2[src]

impl From<Simd<[u16; 2]>> for u128x2[src]

impl From<Simd<[u16; 32]>> for [u16; 32][src]

impl From<Simd<[u16; 4]>> for [u16; 4][src]

impl From<Simd<[u16; 4]>> for i32x4[src]

impl From<Simd<[u16; 4]>> for u32x4[src]

impl From<Simd<[u16; 4]>> for f32x4[src]

impl From<Simd<[u16; 4]>> for i64x4[src]

impl From<Simd<[u16; 4]>> for u64x4[src]

impl From<Simd<[u16; 4]>> for f64x4[src]

impl From<Simd<[u16; 4]>> for i128x4[src]

impl From<Simd<[u16; 4]>> for u128x4[src]

impl From<Simd<[u16; 8]>> for [u16; 8][src]

impl From<Simd<[u16; 8]>> for i32x8[src]

impl From<Simd<[u16; 8]>> for u32x8[src]

impl From<Simd<[u16; 8]>> for f32x8[src]

impl From<Simd<[u16; 8]>> for i64x8[src]

impl From<Simd<[u16; 8]>> for u64x8[src]

impl From<Simd<[u16; 8]>> for f64x8[src]

impl From<Simd<[u32; 16]>> for [u32; 16][src]

impl From<Simd<[u32; 2]>> for [u32; 2][src]

impl From<Simd<[u32; 2]>> for i64x2[src]

impl From<Simd<[u32; 2]>> for u64x2[src]

impl From<Simd<[u32; 2]>> for f64x2[src]

impl From<Simd<[u32; 2]>> for i128x2[src]

impl From<Simd<[u32; 2]>> for u128x2[src]

impl From<Simd<[u32; 4]>> for [u32; 4][src]

impl From<Simd<[u32; 4]>> for i64x4[src]

impl From<Simd<[u32; 4]>> for u64x4[src]

impl From<Simd<[u32; 4]>> for f64x4[src]

impl From<Simd<[u32; 4]>> for i128x4[src]

impl From<Simd<[u32; 4]>> for u128x4[src]

impl From<Simd<[u32; 8]>> for [u32; 8][src]

impl From<Simd<[u32; 8]>> for i64x8[src]

impl From<Simd<[u32; 8]>> for u64x8[src]

impl From<Simd<[u32; 8]>> for f64x8[src]

impl From<Simd<[u64; 2]>> for [u64; 2][src]

impl From<Simd<[u64; 2]>> for i128x2[src]

impl From<Simd<[u64; 2]>> for u128x2[src]

impl From<Simd<[u64; 4]>> for [u64; 4][src]

impl From<Simd<[u64; 4]>> for i128x4[src]

impl From<Simd<[u64; 4]>> for u128x4[src]

impl From<Simd<[u64; 8]>> for [u64; 8][src]

impl From<Simd<[u8; 16]>> for [u8; 16][src]

impl From<Simd<[u8; 16]>> for i16x16[src]

impl From<Simd<[u8; 16]>> for u16x16[src]

impl From<Simd<[u8; 16]>> for i32x16[src]

impl From<Simd<[u8; 16]>> for u32x16[src]

impl From<Simd<[u8; 16]>> for f32x16[src]

impl From<Simd<[u8; 2]>> for [u8; 2][src]

impl From<Simd<[u8; 2]>> for i16x2[src]

impl From<Simd<[u8; 2]>> for u128x2[src]

impl From<Simd<[u8; 2]>> for u16x2[src]

impl From<Simd<[u8; 2]>> for i32x2[src]

impl From<Simd<[u8; 2]>> for u32x2[src]

impl From<Simd<[u8; 2]>> for f32x2[src]

impl From<Simd<[u8; 2]>> for i64x2[src]

impl From<Simd<[u8; 2]>> for u64x2[src]

impl From<Simd<[u8; 2]>> for f64x2[src]

impl From<Simd<[u8; 2]>> for i128x2[src]

impl From<Simd<[u8; 32]>> for [u8; 32][src]

impl From<Simd<[u8; 32]>> for i16x32[src]

impl From<Simd<[u8; 32]>> for u16x32[src]

impl From<Simd<[u8; 4]>> for [u8; 4][src]

impl From<Simd<[u8; 4]>> for i16x4[src]

impl From<Simd<[u8; 4]>> for u128x4[src]

impl From<Simd<[u8; 4]>> for u16x4[src]

impl From<Simd<[u8; 4]>> for i32x4[src]

impl From<Simd<[u8; 4]>> for u32x4[src]

impl From<Simd<[u8; 4]>> for f32x4[src]

impl From<Simd<[u8; 4]>> for i64x4[src]

impl From<Simd<[u8; 4]>> for u64x4[src]

impl From<Simd<[u8; 4]>> for f64x4[src]

impl From<Simd<[u8; 4]>> for i128x4[src]

impl From<Simd<[u8; 64]>> for [u8; 64][src]

impl From<Simd<[u8; 8]>> for [u8; 8][src]

impl From<Simd<[u8; 8]>> for i16x8[src]

impl From<Simd<[u8; 8]>> for u16x8[src]

impl From<Simd<[u8; 8]>> for i32x8[src]

impl From<Simd<[u8; 8]>> for u32x8[src]

impl From<Simd<[u8; 8]>> for f32x8[src]

impl From<Simd<[u8; 8]>> for i64x8[src]

impl From<Simd<[u8; 8]>> for u64x8[src]

impl From<Simd<[u8; 8]>> for f64x8[src]

impl From<Simd<[usize; 2]>> for [usize; 2][src]

impl From<Simd<[usize; 4]>> for [usize; 4][src]

impl From<Simd<[usize; 8]>> for [usize; 8][src]

impl FromBits<Simd<[f32; 16]>> for i8x64[src]

impl FromBits<Simd<[f32; 16]>> for u8x64[src]

impl FromBits<Simd<[f32; 16]>> for u128x4[src]

impl FromBits<Simd<[f32; 16]>> for i16x32[src]

impl FromBits<Simd<[f32; 16]>> for u16x32[src]

impl FromBits<Simd<[f32; 16]>> for i32x16[src]

impl FromBits<Simd<[f32; 16]>> for u32x16[src]

impl FromBits<Simd<[f32; 16]>> for i64x8[src]

impl FromBits<Simd<[f32; 16]>> for u64x8[src]

impl FromBits<Simd<[f32; 16]>> for f64x8[src]

impl FromBits<Simd<[f32; 16]>> for i128x4[src]

impl FromBits<Simd<[f32; 2]>> for i8x8[src]

impl FromBits<Simd<[f32; 2]>> for u8x8[src]

impl FromBits<Simd<[f32; 2]>> for i16x4[src]

impl FromBits<Simd<[f32; 2]>> for u16x4[src]

impl FromBits<Simd<[f32; 2]>> for i32x2[src]

impl FromBits<Simd<[f32; 2]>> for u32x2[src]

impl FromBits<Simd<[f32; 2]>> for __m64[src]

impl FromBits<Simd<[f32; 4]>> for i8x16[src]

impl FromBits<Simd<[f32; 4]>> for u8x16[src]

impl FromBits<Simd<[f32; 4]>> for u128x1[src]

impl FromBits<Simd<[f32; 4]>> for __m128[src]

impl FromBits<Simd<[f32; 4]>> for __m128i[src]

impl FromBits<Simd<[f32; 4]>> for __m128d[src]

impl FromBits<Simd<[f32; 4]>> for i16x8[src]

impl FromBits<Simd<[f32; 4]>> for u16x8[src]

impl FromBits<Simd<[f32; 4]>> for i32x4[src]

impl FromBits<Simd<[f32; 4]>> for u32x4[src]

impl FromBits<Simd<[f32; 4]>> for i64x2[src]

impl FromBits<Simd<[f32; 4]>> for u64x2[src]

impl FromBits<Simd<[f32; 4]>> for f64x2[src]

impl FromBits<Simd<[f32; 4]>> for i128x1[src]

impl FromBits<Simd<[f32; 8]>> for i8x32[src]

impl FromBits<Simd<[f32; 8]>> for u8x32[src]

impl FromBits<Simd<[f32; 8]>> for u128x2[src]

impl FromBits<Simd<[f32; 8]>> for __m256[src]

impl FromBits<Simd<[f32; 8]>> for __m256i[src]

impl FromBits<Simd<[f32; 8]>> for __m256d[src]

impl FromBits<Simd<[f32; 8]>> for i16x16[src]

impl FromBits<Simd<[f32; 8]>> for u16x16[src]

impl FromBits<Simd<[f32; 8]>> for i32x8[src]

impl FromBits<Simd<[f32; 8]>> for u32x8[src]

impl FromBits<Simd<[f32; 8]>> for i64x4[src]

impl FromBits<Simd<[f32; 8]>> for u64x4[src]

impl FromBits<Simd<[f32; 8]>> for f64x4[src]

impl FromBits<Simd<[f32; 8]>> for i128x2[src]

impl FromBits<Simd<[f64; 2]>> for i8x16[src]

impl FromBits<Simd<[f64; 2]>> for u8x16[src]

impl FromBits<Simd<[f64; 2]>> for u128x1[src]

impl FromBits<Simd<[f64; 2]>> for __m128[src]

impl FromBits<Simd<[f64; 2]>> for __m128i[src]

impl FromBits<Simd<[f64; 2]>> for __m128d[src]

impl FromBits<Simd<[f64; 2]>> for i16x8[src]

impl FromBits<Simd<[f64; 2]>> for u16x8[src]

impl FromBits<Simd<[f64; 2]>> for i32x4[src]

impl FromBits<Simd<[f64; 2]>> for u32x4[src]

impl FromBits<Simd<[f64; 2]>> for f32x4[src]

impl FromBits<Simd<[f64; 2]>> for i64x2[src]

impl FromBits<Simd<[f64; 2]>> for u64x2[src]

impl FromBits<Simd<[f64; 2]>> for i128x1[src]

impl FromBits<Simd<[f64; 4]>> for i8x32[src]

impl FromBits<Simd<[f64; 4]>> for u8x32[src]

impl FromBits<Simd<[f64; 4]>> for u128x2[src]

impl FromBits<Simd<[f64; 4]>> for __m256[src]

impl FromBits<Simd<[f64; 4]>> for __m256i[src]

impl FromBits<Simd<[f64; 4]>> for __m256d[src]

impl FromBits<Simd<[f64; 4]>> for i16x16[src]

impl FromBits<Simd<[f64; 4]>> for u16x16[src]

impl FromBits<Simd<[f64; 4]>> for i32x8[src]

impl FromBits<Simd<[f64; 4]>> for u32x8[src]

impl FromBits<Simd<[f64; 4]>> for f32x8[src]

impl FromBits<Simd<[f64; 4]>> for i64x4[src]

impl FromBits<Simd<[f64; 4]>> for u64x4[src]

impl FromBits<Simd<[f64; 4]>> for i128x2[src]

impl FromBits<Simd<[f64; 8]>> for i8x64[src]

impl FromBits<Simd<[f64; 8]>> for u8x64[src]

impl FromBits<Simd<[f64; 8]>> for u128x4[src]

impl FromBits<Simd<[f64; 8]>> for i16x32[src]

impl FromBits<Simd<[f64; 8]>> for u16x32[src]

impl FromBits<Simd<[f64; 8]>> for i32x16[src]

impl FromBits<Simd<[f64; 8]>> for u32x16[src]

impl FromBits<Simd<[f64; 8]>> for f32x16[src]

impl FromBits<Simd<[f64; 8]>> for i64x8[src]

impl FromBits<Simd<[f64; 8]>> for u64x8[src]

impl FromBits<Simd<[f64; 8]>> for i128x4[src]

impl FromBits<Simd<[i128; 1]>> for i8x16[src]

impl FromBits<Simd<[i128; 1]>> for u8x16[src]

impl FromBits<Simd<[i128; 1]>> for u128x1[src]

impl FromBits<Simd<[i128; 1]>> for __m128[src]

impl FromBits<Simd<[i128; 1]>> for __m128i[src]

impl FromBits<Simd<[i128; 1]>> for __m128d[src]

impl FromBits<Simd<[i128; 1]>> for i16x8[src]

impl FromBits<Simd<[i128; 1]>> for u16x8[src]

impl FromBits<Simd<[i128; 1]>> for i32x4[src]

impl FromBits<Simd<[i128; 1]>> for u32x4[src]

impl FromBits<Simd<[i128; 1]>> for f32x4[src]

impl FromBits<Simd<[i128; 1]>> for i64x2[src]

impl FromBits<Simd<[i128; 1]>> for u64x2[src]

impl FromBits<Simd<[i128; 1]>> for f64x2[src]

impl FromBits<Simd<[i128; 2]>> for i8x32[src]

impl FromBits<Simd<[i128; 2]>> for u8x32[src]

impl FromBits<Simd<[i128; 2]>> for u128x2[src]

impl FromBits<Simd<[i128; 2]>> for __m256[src]

impl FromBits<Simd<[i128; 2]>> for __m256i[src]

impl FromBits<Simd<[i128; 2]>> for __m256d[src]

impl FromBits<Simd<[i128; 2]>> for i16x16[src]

impl FromBits<Simd<[i128; 2]>> for u16x16[src]

impl FromBits<Simd<[i128; 2]>> for i32x8[src]

impl FromBits<Simd<[i128; 2]>> for u32x8[src]

impl FromBits<Simd<[i128; 2]>> for f32x8[src]

impl FromBits<Simd<[i128; 2]>> for i64x4[src]

impl FromBits<Simd<[i128; 2]>> for u64x4[src]

impl FromBits<Simd<[i128; 2]>> for f64x4[src]

impl FromBits<Simd<[i128; 4]>> for i8x64[src]

impl FromBits<Simd<[i128; 4]>> for u8x64[src]

impl FromBits<Simd<[i128; 4]>> for u128x4[src]

impl FromBits<Simd<[i128; 4]>> for i16x32[src]

impl FromBits<Simd<[i128; 4]>> for u16x32[src]

impl FromBits<Simd<[i128; 4]>> for i32x16[src]

impl FromBits<Simd<[i128; 4]>> for u32x16[src]

impl FromBits<Simd<[i128; 4]>> for f32x16[src]

impl FromBits<Simd<[i128; 4]>> for i64x8[src]

impl FromBits<Simd<[i128; 4]>> for u64x8[src]

impl FromBits<Simd<[i128; 4]>> for f64x8[src]

impl FromBits<Simd<[i16; 16]>> for i8x32[src]

impl FromBits<Simd<[i16; 16]>> for u8x32[src]

impl FromBits<Simd<[i16; 16]>> for u128x2[src]

impl FromBits<Simd<[i16; 16]>> for __m256[src]

impl FromBits<Simd<[i16; 16]>> for __m256i[src]

impl FromBits<Simd<[i16; 16]>> for __m256d[src]

impl FromBits<Simd<[i16; 16]>> for u16x16[src]

impl FromBits<Simd<[i16; 16]>> for i32x8[src]

impl FromBits<Simd<[i16; 16]>> for u32x8[src]

impl FromBits<Simd<[i16; 16]>> for f32x8[src]

impl FromBits<Simd<[i16; 16]>> for i64x4[src]

impl FromBits<Simd<[i16; 16]>> for u64x4[src]

impl FromBits<Simd<[i16; 16]>> for f64x4[src]

impl FromBits<Simd<[i16; 16]>> for i128x2[src]

impl FromBits<Simd<[i16; 2]>> for i8x4[src]

impl FromBits<Simd<[i16; 2]>> for u8x4[src]

impl FromBits<Simd<[i16; 2]>> for u16x2[src]

impl FromBits<Simd<[i16; 32]>> for i8x64[src]

impl FromBits<Simd<[i16; 32]>> for u8x64[src]

impl FromBits<Simd<[i16; 32]>> for u128x4[src]

impl FromBits<Simd<[i16; 32]>> for u16x32[src]

impl FromBits<Simd<[i16; 32]>> for i32x16[src]

impl FromBits<Simd<[i16; 32]>> for u32x16[src]

impl FromBits<Simd<[i16; 32]>> for f32x16[src]

impl FromBits<Simd<[i16; 32]>> for i64x8[src]

impl FromBits<Simd<[i16; 32]>> for u64x8[src]

impl FromBits<Simd<[i16; 32]>> for f64x8[src]

impl FromBits<Simd<[i16; 32]>> for i128x4[src]

impl FromBits<Simd<[i16; 4]>> for i8x8[src]

impl FromBits<Simd<[i16; 4]>> for u8x8[src]

impl FromBits<Simd<[i16; 4]>> for u16x4[src]

impl FromBits<Simd<[i16; 4]>> for i32x2[src]

impl FromBits<Simd<[i16; 4]>> for u32x2[src]

impl FromBits<Simd<[i16; 4]>> for f32x2[src]

impl FromBits<Simd<[i16; 4]>> for __m64[src]

impl FromBits<Simd<[i16; 8]>> for i8x16[src]

impl FromBits<Simd<[i16; 8]>> for u8x16[src]

impl FromBits<Simd<[i16; 8]>> for u128x1[src]

impl FromBits<Simd<[i16; 8]>> for __m128[src]

impl FromBits<Simd<[i16; 8]>> for __m128i[src]

impl FromBits<Simd<[i16; 8]>> for __m128d[src]

impl FromBits<Simd<[i16; 8]>> for u16x8[src]

impl FromBits<Simd<[i16; 8]>> for i32x4[src]

impl FromBits<Simd<[i16; 8]>> for u32x4[src]

impl FromBits<Simd<[i16; 8]>> for f32x4[src]

impl FromBits<Simd<[i16; 8]>> for i64x2[src]

impl FromBits<Simd<[i16; 8]>> for u64x2[src]

impl FromBits<Simd<[i16; 8]>> for f64x2[src]

impl FromBits<Simd<[i16; 8]>> for i128x1[src]

impl FromBits<Simd<[i32; 16]>> for i8x64[src]

impl FromBits<Simd<[i32; 16]>> for u8x64[src]

impl FromBits<Simd<[i32; 16]>> for u128x4[src]

impl FromBits<Simd<[i32; 16]>> for i16x32[src]

impl FromBits<Simd<[i32; 16]>> for u16x32[src]

impl FromBits<Simd<[i32; 16]>> for u32x16[src]

impl FromBits<Simd<[i32; 16]>> for f32x16[src]

impl FromBits<Simd<[i32; 16]>> for i64x8[src]

impl FromBits<Simd<[i32; 16]>> for u64x8[src]

impl FromBits<Simd<[i32; 16]>> for f64x8[src]

impl FromBits<Simd<[i32; 16]>> for i128x4[src]

impl FromBits<Simd<[i32; 2]>> for i8x8[src]

impl FromBits<Simd<[i32; 2]>> for u8x8[src]

impl FromBits<Simd<[i32; 2]>> for i16x4[src]

impl FromBits<Simd<[i32; 2]>> for u16x4[src]

impl FromBits<Simd<[i32; 2]>> for u32x2[src]

impl FromBits<Simd<[i32; 2]>> for f32x2[src]

impl FromBits<Simd<[i32; 2]>> for __m64[src]

impl FromBits<Simd<[i32; 4]>> for i8x16[src]

impl FromBits<Simd<[i32; 4]>> for u8x16[src]

impl FromBits<Simd<[i32; 4]>> for u128x1[src]

impl FromBits<Simd<[i32; 4]>> for __m128[src]

impl FromBits<Simd<[i32; 4]>> for __m128i[src]

impl FromBits<Simd<[i32; 4]>> for __m128d[src]

impl FromBits<Simd<[i32; 4]>> for i16x8[src]

impl FromBits<Simd<[i32; 4]>> for u16x8[src]

impl FromBits<Simd<[i32; 4]>> for u32x4[src]

impl FromBits<Simd<[i32; 4]>> for f32x4[src]

impl FromBits<Simd<[i32; 4]>> for i64x2[src]

impl FromBits<Simd<[i32; 4]>> for u64x2[src]

impl FromBits<Simd<[i32; 4]>> for f64x2[src]

impl FromBits<Simd<[i32; 4]>> for i128x1[src]

impl FromBits<Simd<[i32; 8]>> for i8x32[src]

impl FromBits<Simd<[i32; 8]>> for u8x32[src]

impl FromBits<Simd<[i32; 8]>> for u128x2[src]

impl FromBits<Simd<[i32; 8]>> for __m256[src]

impl FromBits<Simd<[i32; 8]>> for __m256i[src]

impl FromBits<Simd<[i32; 8]>> for __m256d[src]

impl FromBits<Simd<[i32; 8]>> for i16x16[src]

impl FromBits<Simd<[i32; 8]>> for u16x16[src]

impl FromBits<Simd<[i32; 8]>> for u32x8[src]

impl FromBits<Simd<[i32; 8]>> for f32x8[src]

impl FromBits<Simd<[i32; 8]>> for i64x4[src]

impl FromBits<Simd<[i32; 8]>> for u64x4[src]

impl FromBits<Simd<[i32; 8]>> for f64x4[src]

impl FromBits<Simd<[i32; 8]>> for i128x2[src]

impl FromBits<Simd<[i64; 2]>> for i8x16[src]

impl FromBits<Simd<[i64; 2]>> for u8x16[src]

impl FromBits<Simd<[i64; 2]>> for u128x1[src]

impl FromBits<Simd<[i64; 2]>> for __m128[src]

impl FromBits<Simd<[i64; 2]>> for __m128i[src]

impl FromBits<Simd<[i64; 2]>> for __m128d[src]

impl FromBits<Simd<[i64; 2]>> for i16x8[src]

impl FromBits<Simd<[i64; 2]>> for u16x8[src]

impl FromBits<Simd<[i64; 2]>> for i32x4[src]

impl FromBits<Simd<[i64; 2]>> for u32x4[src]

impl FromBits<Simd<[i64; 2]>> for f32x4[src]

impl FromBits<Simd<[i64; 2]>> for u64x2[src]

impl FromBits<Simd<[i64; 2]>> for f64x2[src]

impl FromBits<Simd<[i64; 2]>> for i128x1[src]

impl FromBits<Simd<[i64; 4]>> for i8x32[src]

impl FromBits<Simd<[i64; 4]>> for u8x32[src]

impl FromBits<Simd<[i64; 4]>> for u128x2[src]

impl FromBits<Simd<[i64; 4]>> for __m256[src]

impl FromBits<Simd<[i64; 4]>> for __m256i[src]

impl FromBits<Simd<[i64; 4]>> for __m256d[src]

impl FromBits<Simd<[i64; 4]>> for i16x16[src]

impl FromBits<Simd<[i64; 4]>> for u16x16[src]

impl FromBits<Simd<[i64; 4]>> for i32x8[src]

impl FromBits<Simd<[i64; 4]>> for u32x8[src]

impl FromBits<Simd<[i64; 4]>> for f32x8[src]

impl FromBits<Simd<[i64; 4]>> for u64x4[src]

impl FromBits<Simd<[i64; 4]>> for f64x4[src]

impl FromBits<Simd<[i64; 4]>> for i128x2[src]

impl FromBits<Simd<[i64; 8]>> for i8x64[src]

impl FromBits<Simd<[i64; 8]>> for u8x64[src]

impl FromBits<Simd<[i64; 8]>> for u128x4[src]

impl FromBits<Simd<[i64; 8]>> for i16x32[src]

impl FromBits<Simd<[i64; 8]>> for u16x32[src]

impl FromBits<Simd<[i64; 8]>> for i32x16[src]

impl FromBits<Simd<[i64; 8]>> for u32x16[src]

impl FromBits<Simd<[i64; 8]>> for f32x16[src]

impl FromBits<Simd<[i64; 8]>> for u64x8[src]

impl FromBits<Simd<[i64; 8]>> for f64x8[src]

impl FromBits<Simd<[i64; 8]>> for i128x4[src]

impl FromBits<Simd<[i8; 16]>> for u8x16[src]

impl FromBits<Simd<[i8; 16]>> for i16x8[src]

impl FromBits<Simd<[i8; 16]>> for u128x1[src]

impl FromBits<Simd<[i8; 16]>> for __m128[src]

impl FromBits<Simd<[i8; 16]>> for __m128i[src]

impl FromBits<Simd<[i8; 16]>> for __m128d[src]

impl FromBits<Simd<[i8; 16]>> for u16x8[src]

impl FromBits<Simd<[i8; 16]>> for i32x4[src]

impl FromBits<Simd<[i8; 16]>> for u32x4[src]

impl FromBits<Simd<[i8; 16]>> for f32x4[src]

impl FromBits<Simd<[i8; 16]>> for i64x2[src]

impl FromBits<Simd<[i8; 16]>> for u64x2[src]

impl FromBits<Simd<[i8; 16]>> for f64x2[src]

impl FromBits<Simd<[i8; 16]>> for i128x1[src]

impl FromBits<Simd<[i8; 2]>> for u8x2[src]

impl FromBits<Simd<[i8; 32]>> for u8x32[src]

impl FromBits<Simd<[i8; 32]>> for i16x16[src]

impl FromBits<Simd<[i8; 32]>> for u128x2[src]

impl FromBits<Simd<[i8; 32]>> for __m256[src]

impl FromBits<Simd<[i8; 32]>> for __m256i[src]

impl FromBits<Simd<[i8; 32]>> for __m256d[src]

impl FromBits<Simd<[i8; 32]>> for u16x16[src]

impl FromBits<Simd<[i8; 32]>> for i32x8[src]

impl FromBits<Simd<[i8; 32]>> for u32x8[src]

impl FromBits<Simd<[i8; 32]>> for f32x8[src]

impl FromBits<Simd<[i8; 32]>> for i64x4[src]

impl FromBits<Simd<[i8; 32]>> for u64x4[src]

impl FromBits<Simd<[i8; 32]>> for f64x4[src]

impl FromBits<Simd<[i8; 32]>> for i128x2[src]

impl FromBits<Simd<[i8; 4]>> for u8x4[src]

impl FromBits<Simd<[i8; 4]>> for i16x2[src]

impl FromBits<Simd<[i8; 4]>> for u16x2[src]

impl FromBits<Simd<[i8; 64]>> for u8x64[src]

impl FromBits<Simd<[i8; 64]>> for i16x32[src]

impl FromBits<Simd<[i8; 64]>> for u128x4[src]

impl FromBits<Simd<[i8; 64]>> for u16x32[src]

impl FromBits<Simd<[i8; 64]>> for i32x16[src]

impl FromBits<Simd<[i8; 64]>> for u32x16[src]

impl FromBits<Simd<[i8; 64]>> for f32x16[src]

impl FromBits<Simd<[i8; 64]>> for i64x8[src]

impl FromBits<Simd<[i8; 64]>> for u64x8[src]

impl FromBits<Simd<[i8; 64]>> for f64x8[src]

impl FromBits<Simd<[i8; 64]>> for i128x4[src]

impl FromBits<Simd<[i8; 8]>> for u8x8[src]

impl FromBits<Simd<[i8; 8]>> for i16x4[src]

impl FromBits<Simd<[i8; 8]>> for u16x4[src]

impl FromBits<Simd<[i8; 8]>> for i32x2[src]

impl FromBits<Simd<[i8; 8]>> for u32x2[src]

impl FromBits<Simd<[i8; 8]>> for f32x2[src]

impl FromBits<Simd<[i8; 8]>> for __m64[src]

impl FromBits<Simd<[m128; 1]>> for i8x16[src]

impl FromBits<Simd<[m128; 1]>> for u8x16[src]

impl FromBits<Simd<[m128; 1]>> for i64x2[src]

impl FromBits<Simd<[m128; 1]>> for u64x2[src]

impl FromBits<Simd<[m128; 1]>> for f64x2[src]

impl FromBits<Simd<[m128; 1]>> for m64x2[src]

impl FromBits<Simd<[m128; 1]>> for i128x1[src]

impl FromBits<Simd<[m128; 1]>> for u128x1[src]

impl FromBits<Simd<[m128; 1]>> for __m128[src]

impl FromBits<Simd<[m128; 1]>> for __m128i[src]

impl FromBits<Simd<[m128; 1]>> for __m128d[src]

impl FromBits<Simd<[m128; 1]>> for m8x16[src]

impl FromBits<Simd<[m128; 1]>> for i16x8[src]

impl FromBits<Simd<[m128; 1]>> for u16x8[src]

impl FromBits<Simd<[m128; 1]>> for m16x8[src]

impl FromBits<Simd<[m128; 1]>> for i32x4[src]

impl FromBits<Simd<[m128; 1]>> for u32x4[src]

impl FromBits<Simd<[m128; 1]>> for f32x4[src]

impl FromBits<Simd<[m128; 1]>> for m32x4[src]

impl FromBits<Simd<[m128; 2]>> for i8x32[src]

impl FromBits<Simd<[m128; 2]>> for u8x32[src]

impl FromBits<Simd<[m128; 2]>> for i64x4[src]

impl FromBits<Simd<[m128; 2]>> for u64x4[src]

impl FromBits<Simd<[m128; 2]>> for f64x4[src]

impl FromBits<Simd<[m128; 2]>> for m64x4[src]

impl FromBits<Simd<[m128; 2]>> for i128x2[src]

impl FromBits<Simd<[m128; 2]>> for u128x2[src]

impl FromBits<Simd<[m128; 2]>> for __m256[src]

impl FromBits<Simd<[m128; 2]>> for __m256i[src]

impl FromBits<Simd<[m128; 2]>> for __m256d[src]

impl FromBits<Simd<[m128; 2]>> for m8x32[src]

impl FromBits<Simd<[m128; 2]>> for i16x16[src]

impl FromBits<Simd<[m128; 2]>> for u16x16[src]

impl FromBits<Simd<[m128; 2]>> for m16x16[src]

impl FromBits<Simd<[m128; 2]>> for i32x8[src]

impl FromBits<Simd<[m128; 2]>> for u32x8[src]

impl FromBits<Simd<[m128; 2]>> for f32x8[src]

impl FromBits<Simd<[m128; 2]>> for m32x8[src]

impl FromBits<Simd<[m128; 4]>> for i8x64[src]

impl FromBits<Simd<[m128; 4]>> for u8x64[src]

impl FromBits<Simd<[m128; 4]>> for i64x8[src]

impl FromBits<Simd<[m128; 4]>> for u64x8[src]

impl FromBits<Simd<[m128; 4]>> for f64x8[src]

impl FromBits<Simd<[m128; 4]>> for m64x8[src]

impl FromBits<Simd<[m128; 4]>> for i128x4[src]

impl FromBits<Simd<[m128; 4]>> for u128x4[src]

impl FromBits<Simd<[m128; 4]>> for m8x64[src]

impl FromBits<Simd<[m128; 4]>> for i16x32[src]

impl FromBits<Simd<[m128; 4]>> for u16x32[src]

impl FromBits<Simd<[m128; 4]>> for m16x32[src]

impl FromBits<Simd<[m128; 4]>> for i32x16[src]

impl FromBits<Simd<[m128; 4]>> for u32x16[src]

impl FromBits<Simd<[m128; 4]>> for f32x16[src]

impl FromBits<Simd<[m128; 4]>> for m32x16[src]

impl FromBits<Simd<[m16; 16]>> for i8x32[src]

impl FromBits<Simd<[m16; 16]>> for u8x32[src]

impl FromBits<Simd<[m16; 16]>> for f64x4[src]

impl FromBits<Simd<[m16; 16]>> for i128x2[src]

impl FromBits<Simd<[m16; 16]>> for u128x2[src]

impl FromBits<Simd<[m16; 16]>> for __m256[src]

impl FromBits<Simd<[m16; 16]>> for __m256i[src]

impl FromBits<Simd<[m16; 16]>> for __m256d[src]

impl FromBits<Simd<[m16; 16]>> for m8x32[src]

impl FromBits<Simd<[m16; 16]>> for i16x16[src]

impl FromBits<Simd<[m16; 16]>> for u16x16[src]

impl FromBits<Simd<[m16; 16]>> for i32x8[src]

impl FromBits<Simd<[m16; 16]>> for u32x8[src]

impl FromBits<Simd<[m16; 16]>> for f32x8[src]

impl FromBits<Simd<[m16; 16]>> for i64x4[src]

impl FromBits<Simd<[m16; 16]>> for u64x4[src]

impl FromBits<Simd<[m16; 2]>> for i8x4[src]

impl FromBits<Simd<[m16; 2]>> for u8x4[src]

impl FromBits<Simd<[m16; 2]>> for m8x4[src]

impl FromBits<Simd<[m16; 2]>> for i16x2[src]

impl FromBits<Simd<[m16; 2]>> for u16x2[src]

impl FromBits<Simd<[m16; 32]>> for i8x64[src]

impl FromBits<Simd<[m16; 32]>> for u8x64[src]

impl FromBits<Simd<[m16; 32]>> for f64x8[src]

impl FromBits<Simd<[m16; 32]>> for i128x4[src]

impl FromBits<Simd<[m16; 32]>> for u128x4[src]

impl FromBits<Simd<[m16; 32]>> for m8x64[src]

impl FromBits<Simd<[m16; 32]>> for i16x32[src]

impl FromBits<Simd<[m16; 32]>> for u16x32[src]

impl FromBits<Simd<[m16; 32]>> for i32x16[src]

impl FromBits<Simd<[m16; 32]>> for u32x16[src]

impl FromBits<Simd<[m16; 32]>> for f32x16[src]

impl FromBits<Simd<[m16; 32]>> for i64x8[src]

impl FromBits<Simd<[m16; 32]>> for u64x8[src]

impl FromBits<Simd<[m16; 4]>> for i8x8[src]

impl FromBits<Simd<[m16; 4]>> for u8x8[src]

impl FromBits<Simd<[m16; 4]>> for m8x8[src]

impl FromBits<Simd<[m16; 4]>> for i16x4[src]

impl FromBits<Simd<[m16; 4]>> for u16x4[src]

impl FromBits<Simd<[m16; 4]>> for i32x2[src]

impl FromBits<Simd<[m16; 4]>> for u32x2[src]

impl FromBits<Simd<[m16; 4]>> for f32x2[src]

impl FromBits<Simd<[m16; 4]>> for __m64[src]

impl FromBits<Simd<[m16; 8]>> for i8x16[src]

impl FromBits<Simd<[m16; 8]>> for u8x16[src]

impl FromBits<Simd<[m16; 8]>> for f64x2[src]

impl FromBits<Simd<[m16; 8]>> for i128x1[src]

impl FromBits<Simd<[m16; 8]>> for u128x1[src]

impl FromBits<Simd<[m16; 8]>> for __m128[src]

impl FromBits<Simd<[m16; 8]>> for __m128i[src]

impl FromBits<Simd<[m16; 8]>> for __m128d[src]

impl FromBits<Simd<[m16; 8]>> for m8x16[src]

impl FromBits<Simd<[m16; 8]>> for i16x8[src]

impl FromBits<Simd<[m16; 8]>> for u16x8[src]

impl FromBits<Simd<[m16; 8]>> for i32x4[src]

impl FromBits<Simd<[m16; 8]>> for u32x4[src]

impl FromBits<Simd<[m16; 8]>> for f32x4[src]

impl FromBits<Simd<[m16; 8]>> for i64x2[src]

impl FromBits<Simd<[m16; 8]>> for u64x2[src]

impl FromBits<Simd<[m32; 16]>> for i8x64[src]

impl FromBits<Simd<[m32; 16]>> for u8x64[src]

impl FromBits<Simd<[m32; 16]>> for u64x8[src]

impl FromBits<Simd<[m32; 16]>> for f64x8[src]

impl FromBits<Simd<[m32; 16]>> for i128x4[src]

impl FromBits<Simd<[m32; 16]>> for u128x4[src]

impl FromBits<Simd<[m32; 16]>> for m8x64[src]

impl FromBits<Simd<[m32; 16]>> for i16x32[src]

impl FromBits<Simd<[m32; 16]>> for u16x32[src]

impl FromBits<Simd<[m32; 16]>> for m16x32[src]

impl FromBits<Simd<[m32; 16]>> for i32x16[src]

impl FromBits<Simd<[m32; 16]>> for u32x16[src]

impl FromBits<Simd<[m32; 16]>> for f32x16[src]

impl FromBits<Simd<[m32; 16]>> for i64x8[src]

impl FromBits<Simd<[m32; 2]>> for i8x8[src]

impl FromBits<Simd<[m32; 2]>> for u8x8[src]

impl FromBits<Simd<[m32; 2]>> for m8x8[src]

impl FromBits<Simd<[m32; 2]>> for i16x4[src]

impl FromBits<Simd<[m32; 2]>> for u16x4[src]

impl FromBits<Simd<[m32; 2]>> for m16x4[src]

impl FromBits<Simd<[m32; 2]>> for i32x2[src]

impl FromBits<Simd<[m32; 2]>> for u32x2[src]

impl FromBits<Simd<[m32; 2]>> for f32x2[src]

impl FromBits<Simd<[m32; 2]>> for __m64[src]

impl FromBits<Simd<[m32; 4]>> for i8x16[src]

impl FromBits<Simd<[m32; 4]>> for u8x16[src]

impl FromBits<Simd<[m32; 4]>> for u64x2[src]

impl FromBits<Simd<[m32; 4]>> for f64x2[src]

impl FromBits<Simd<[m32; 4]>> for i128x1[src]

impl FromBits<Simd<[m32; 4]>> for u128x1[src]

impl FromBits<Simd<[m32; 4]>> for __m128[src]

impl FromBits<Simd<[m32; 4]>> for __m128i[src]

impl FromBits<Simd<[m32; 4]>> for __m128d[src]

impl FromBits<Simd<[m32; 4]>> for m8x16[src]

impl FromBits<Simd<[m32; 4]>> for i16x8[src]

impl FromBits<Simd<[m32; 4]>> for u16x8[src]

impl FromBits<Simd<[m32; 4]>> for m16x8[src]

impl FromBits<Simd<[m32; 4]>> for i32x4[src]

impl FromBits<Simd<[m32; 4]>> for u32x4[src]

impl FromBits<Simd<[m32; 4]>> for f32x4[src]

impl FromBits<Simd<[m32; 4]>> for i64x2[src]

impl FromBits<Simd<[m32; 8]>> for i8x32[src]

impl FromBits<Simd<[m32; 8]>> for u8x32[src]

impl FromBits<Simd<[m32; 8]>> for u64x4[src]

impl FromBits<Simd<[m32; 8]>> for f64x4[src]

impl FromBits<Simd<[m32; 8]>> for i128x2[src]

impl FromBits<Simd<[m32; 8]>> for u128x2[src]

impl FromBits<Simd<[m32; 8]>> for __m256[src]

impl FromBits<Simd<[m32; 8]>> for __m256i[src]

impl FromBits<Simd<[m32; 8]>> for __m256d[src]

impl FromBits<Simd<[m32; 8]>> for m8x32[src]

impl FromBits<Simd<[m32; 8]>> for i16x16[src]

impl FromBits<Simd<[m32; 8]>> for u16x16[src]

impl FromBits<Simd<[m32; 8]>> for m16x16[src]

impl FromBits<Simd<[m32; 8]>> for i32x8[src]

impl FromBits<Simd<[m32; 8]>> for u32x8[src]

impl FromBits<Simd<[m32; 8]>> for f32x8[src]

impl FromBits<Simd<[m32; 8]>> for i64x4[src]

impl FromBits<Simd<[m64; 2]>> for i8x16[src]

impl FromBits<Simd<[m64; 2]>> for u8x16[src]

impl FromBits<Simd<[m64; 2]>> for i64x2[src]

impl FromBits<Simd<[m64; 2]>> for u64x2[src]

impl FromBits<Simd<[m64; 2]>> for f64x2[src]

impl FromBits<Simd<[m64; 2]>> for i128x1[src]

impl FromBits<Simd<[m64; 2]>> for u128x1[src]

impl FromBits<Simd<[m64; 2]>> for __m128[src]

impl FromBits<Simd<[m64; 2]>> for __m128i[src]

impl FromBits<Simd<[m64; 2]>> for __m128d[src]

impl FromBits<Simd<[m64; 2]>> for m8x16[src]

impl FromBits<Simd<[m64; 2]>> for i16x8[src]

impl FromBits<Simd<[m64; 2]>> for u16x8[src]

impl FromBits<Simd<[m64; 2]>> for m16x8[src]

impl FromBits<Simd<[m64; 2]>> for i32x4[src]

impl FromBits<Simd<[m64; 2]>> for u32x4[src]

impl FromBits<Simd<[m64; 2]>> for f32x4[src]

impl FromBits<Simd<[m64; 2]>> for m32x4[src]

impl FromBits<Simd<[m64; 4]>> for i8x32[src]

impl FromBits<Simd<[m64; 4]>> for u8x32[src]

impl FromBits<Simd<[m64; 4]>> for i64x4[src]

impl FromBits<Simd<[m64; 4]>> for u64x4[src]

impl FromBits<Simd<[m64; 4]>> for f64x4[src]

impl FromBits<Simd<[m64; 4]>> for i128x2[src]

impl FromBits<Simd<[m64; 4]>> for u128x2[src]

impl FromBits<Simd<[m64; 4]>> for __m256[src]

impl FromBits<Simd<[m64; 4]>> for __m256i[src]

impl FromBits<Simd<[m64; 4]>> for __m256d[src]

impl FromBits<Simd<[m64; 4]>> for m8x32[src]

impl FromBits<Simd<[m64; 4]>> for i16x16[src]

impl FromBits<Simd<[m64; 4]>> for u16x16[src]

impl FromBits<Simd<[m64; 4]>> for m16x16[src]

impl FromBits<Simd<[m64; 4]>> for i32x8[src]

impl FromBits<Simd<[m64; 4]>> for u32x8[src]

impl FromBits<Simd<[m64; 4]>> for f32x8[src]

impl FromBits<Simd<[m64; 4]>> for m32x8[src]

impl FromBits<Simd<[m64; 8]>> for i8x64[src]

impl FromBits<Simd<[m64; 8]>> for u8x64[src]

impl FromBits<Simd<[m64; 8]>> for i64x8[src]

impl FromBits<Simd<[m64; 8]>> for u64x8[src]

impl FromBits<Simd<[m64; 8]>> for f64x8[src]

impl FromBits<Simd<[m64; 8]>> for i128x4[src]

impl FromBits<Simd<[m64; 8]>> for u128x4[src]

impl FromBits<Simd<[m64; 8]>> for m8x64[src]

impl FromBits<Simd<[m64; 8]>> for i16x32[src]

impl FromBits<Simd<[m64; 8]>> for u16x32[src]

impl FromBits<Simd<[m64; 8]>> for m16x32[src]

impl FromBits<Simd<[m64; 8]>> for i32x16[src]

impl FromBits<Simd<[m64; 8]>> for u32x16[src]

impl FromBits<Simd<[m64; 8]>> for f32x16[src]

impl FromBits<Simd<[m64; 8]>> for m32x16[src]

impl FromBits<Simd<[m8; 16]>> for i8x16[src]

impl FromBits<Simd<[m8; 16]>> for u8x16[src]

impl FromBits<Simd<[m8; 16]>> for i128x1[src]

impl FromBits<Simd<[m8; 16]>> for u128x1[src]

impl FromBits<Simd<[m8; 16]>> for __m128[src]

impl FromBits<Simd<[m8; 16]>> for __m128i[src]

impl FromBits<Simd<[m8; 16]>> for __m128d[src]

impl FromBits<Simd<[m8; 16]>> for i16x8[src]

impl FromBits<Simd<[m8; 16]>> for u16x8[src]

impl FromBits<Simd<[m8; 16]>> for i32x4[src]

impl FromBits<Simd<[m8; 16]>> for u32x4[src]

impl FromBits<Simd<[m8; 16]>> for f32x4[src]

impl FromBits<Simd<[m8; 16]>> for i64x2[src]

impl FromBits<Simd<[m8; 16]>> for u64x2[src]

impl FromBits<Simd<[m8; 16]>> for f64x2[src]

impl FromBits<Simd<[m8; 2]>> for i8x2[src]

impl FromBits<Simd<[m8; 2]>> for u8x2[src]

impl FromBits<Simd<[m8; 32]>> for i8x32[src]

impl FromBits<Simd<[m8; 32]>> for u8x32[src]

impl FromBits<Simd<[m8; 32]>> for i128x2[src]

impl FromBits<Simd<[m8; 32]>> for u128x2[src]

impl FromBits<Simd<[m8; 32]>> for __m256[src]

impl FromBits<Simd<[m8; 32]>> for __m256i[src]

impl FromBits<Simd<[m8; 32]>> for __m256d[src]

impl FromBits<Simd<[m8; 32]>> for i16x16[src]

impl FromBits<Simd<[m8; 32]>> for u16x16[src]

impl FromBits<Simd<[m8; 32]>> for i32x8[src]

impl FromBits<Simd<[m8; 32]>> for u32x8[src]

impl FromBits<Simd<[m8; 32]>> for f32x8[src]

impl FromBits<Simd<[m8; 32]>> for i64x4[src]

impl FromBits<Simd<[m8; 32]>> for u64x4[src]

impl FromBits<Simd<[m8; 32]>> for f64x4[src]

impl FromBits<Simd<[m8; 4]>> for i8x4[src]

impl FromBits<Simd<[m8; 4]>> for u8x4[src]

impl FromBits<Simd<[m8; 4]>> for i16x2[src]

impl FromBits<Simd<[m8; 4]>> for u16x2[src]

impl FromBits<Simd<[m8; 64]>> for i8x64[src]

impl FromBits<Simd<[m8; 64]>> for u8x64[src]

impl FromBits<Simd<[m8; 64]>> for i128x4[src]

impl FromBits<Simd<[m8; 64]>> for u128x4[src]

impl FromBits<Simd<[m8; 64]>> for i16x32[src]

impl FromBits<Simd<[m8; 64]>> for u16x32[src]

impl FromBits<Simd<[m8; 64]>> for i32x16[src]

impl FromBits<Simd<[m8; 64]>> for u32x16[src]

impl FromBits<Simd<[m8; 64]>> for f32x16[src]

impl FromBits<Simd<[m8; 64]>> for i64x8[src]

impl FromBits<Simd<[m8; 64]>> for u64x8[src]

impl FromBits<Simd<[m8; 64]>> for f64x8[src]

impl FromBits<Simd<[m8; 8]>> for i8x8[src]

impl FromBits<Simd<[m8; 8]>> for u8x8[src]

impl FromBits<Simd<[m8; 8]>> for i16x4[src]

impl FromBits<Simd<[m8; 8]>> for u16x4[src]

impl FromBits<Simd<[m8; 8]>> for i32x2[src]

impl FromBits<Simd<[m8; 8]>> for u32x2[src]

impl FromBits<Simd<[m8; 8]>> for f32x2[src]

impl FromBits<Simd<[m8; 8]>> for __m64[src]

impl FromBits<Simd<[u128; 1]>> for i8x16[src]

impl FromBits<Simd<[u128; 1]>> for u8x16[src]

impl FromBits<Simd<[u128; 1]>> for i128x1[src]

impl FromBits<Simd<[u128; 1]>> for __m128[src]

impl FromBits<Simd<[u128; 1]>> for __m128i[src]

impl FromBits<Simd<[u128; 1]>> for __m128d[src]

impl FromBits<Simd<[u128; 1]>> for i16x8[src]

impl FromBits<Simd<[u128; 1]>> for u16x8[src]

impl FromBits<Simd<[u128; 1]>> for i32x4[src]

impl FromBits<Simd<[u128; 1]>> for u32x4[src]

impl FromBits<Simd<[u128; 1]>> for f32x4[src]

impl FromBits<Simd<[u128; 1]>> for i64x2[src]

impl FromBits<Simd<[u128; 1]>> for u64x2[src]

impl FromBits<Simd<[u128; 1]>> for f64x2[src]

impl FromBits<Simd<[u128; 2]>> for i8x32[src]

impl FromBits<Simd<[u128; 2]>> for u8x32[src]

impl FromBits<Simd<[u128; 2]>> for i128x2[src]

impl FromBits<Simd<[u128; 2]>> for __m256[src]

impl FromBits<Simd<[u128; 2]>> for __m256i[src]

impl FromBits<Simd<[u128; 2]>> for __m256d[src]

impl FromBits<Simd<[u128; 2]>> for i16x16[src]

impl FromBits<Simd<[u128; 2]>> for u16x16[src]

impl FromBits<Simd<[u128; 2]>> for i32x8[src]

impl FromBits<Simd<[u128; 2]>> for u32x8[src]

impl FromBits<Simd<[u128; 2]>> for f32x8[src]

impl FromBits<Simd<[u128; 2]>> for i64x4[src]

impl FromBits<Simd<[u128; 2]>> for u64x4[src]

impl FromBits<Simd<[u128; 2]>> for f64x4[src]

impl FromBits<Simd<[u128; 4]>> for i8x64[src]

impl FromBits<Simd<[u128; 4]>> for u8x64[src]

impl FromBits<Simd<[u128; 4]>> for i128x4[src]

impl FromBits<Simd<[u128; 4]>> for i16x32[src]

impl FromBits<Simd<[u128; 4]>> for u16x32[src]

impl FromBits<Simd<[u128; 4]>> for i32x16[src]

impl FromBits<Simd<[u128; 4]>> for u32x16[src]

impl FromBits<Simd<[u128; 4]>> for f32x16[src]

impl FromBits<Simd<[u128; 4]>> for i64x8[src]

impl FromBits<Simd<[u128; 4]>> for u64x8[src]

impl FromBits<Simd<[u128; 4]>> for f64x8[src]

impl FromBits<Simd<[u16; 16]>> for i8x32[src]

impl FromBits<Simd<[u16; 16]>> for u8x32[src]

impl FromBits<Simd<[u16; 16]>> for u128x2[src]

impl FromBits<Simd<[u16; 16]>> for __m256[src]

impl FromBits<Simd<[u16; 16]>> for __m256i[src]

impl FromBits<Simd<[u16; 16]>> for __m256d[src]

impl FromBits<Simd<[u16; 16]>> for i16x16[src]

impl FromBits<Simd<[u16; 16]>> for i32x8[src]

impl FromBits<Simd<[u16; 16]>> for u32x8[src]

impl FromBits<Simd<[u16; 16]>> for f32x8[src]

impl FromBits<Simd<[u16; 16]>> for i64x4[src]

impl FromBits<Simd<[u16; 16]>> for u64x4[src]

impl FromBits<Simd<[u16; 16]>> for f64x4[src]

impl FromBits<Simd<[u16; 16]>> for i128x2[src]

impl FromBits<Simd<[u16; 2]>> for i8x4[src]

impl FromBits<Simd<[u16; 2]>> for u8x4[src]

impl FromBits<Simd<[u16; 2]>> for i16x2[src]

impl FromBits<Simd<[u16; 32]>> for i8x64[src]

impl FromBits<Simd<[u16; 32]>> for u8x64[src]

impl FromBits<Simd<[u16; 32]>> for u128x4[src]

impl FromBits<Simd<[u16; 32]>> for i16x32[src]

impl FromBits<Simd<[u16; 32]>> for i32x16[src]

impl FromBits<Simd<[u16; 32]>> for u32x16[src]

impl FromBits<Simd<[u16; 32]>> for f32x16[src]

impl FromBits<Simd<[u16; 32]>> for i64x8[src]

impl FromBits<Simd<[u16; 32]>> for u64x8[src]

impl FromBits<Simd<[u16; 32]>> for f64x8[src]

impl FromBits<Simd<[u16; 32]>> for i128x4[src]

impl FromBits<Simd<[u16; 4]>> for i8x8[src]

impl FromBits<Simd<[u16; 4]>> for u8x8[src]

impl FromBits<Simd<[u16; 4]>> for i16x4[src]

impl FromBits<Simd<[u16; 4]>> for i32x2[src]

impl FromBits<Simd<[u16; 4]>> for u32x2[src]

impl FromBits<Simd<[u16; 4]>> for f32x2[src]

impl FromBits<Simd<[u16; 4]>> for __m64[src]

impl FromBits<Simd<[u16; 8]>> for i8x16[src]

impl FromBits<Simd<[u16; 8]>> for u8x16[src]

impl FromBits<Simd<[u16; 8]>> for u128x1[src]

impl FromBits<Simd<[u16; 8]>> for __m128[src]

impl FromBits<Simd<[u16; 8]>> for __m128i[src]

impl FromBits<Simd<[u16; 8]>> for __m128d[src]

impl FromBits<Simd<[u16; 8]>> for i16x8[src]

impl FromBits<Simd<[u16; 8]>> for i32x4[src]

impl FromBits<Simd<[u16; 8]>> for u32x4[src]

impl FromBits<Simd<[u16; 8]>> for f32x4[src]

impl FromBits<Simd<[u16; 8]>> for i64x2[src]

impl FromBits<Simd<[u16; 8]>> for u64x2[src]

impl FromBits<Simd<[u16; 8]>> for f64x2[src]

impl FromBits<Simd<[u16; 8]>> for i128x1[src]

impl FromBits<Simd<[u32; 16]>> for i8x64[src]

impl FromBits<Simd<[u32; 16]>> for u8x64[src]

impl FromBits<Simd<[u32; 16]>> for u128x4[src]

impl FromBits<Simd<[u32; 16]>> for i16x32[src]

impl FromBits<Simd<[u32; 16]>> for u16x32[src]

impl FromBits<Simd<[u32; 16]>> for i32x16[src]

impl FromBits<Simd<[u32; 16]>> for f32x16[src]

impl FromBits<Simd<[u32; 16]>> for i64x8[src]

impl FromBits<Simd<[u32; 16]>> for u64x8[src]

impl FromBits<Simd<[u32; 16]>> for f64x8[src]

impl FromBits<Simd<[u32; 16]>> for i128x4[src]

impl FromBits<Simd<[u32; 2]>> for i8x8[src]

impl FromBits<Simd<[u32; 2]>> for u8x8[src]

impl FromBits<Simd<[u32; 2]>> for i16x4[src]

impl FromBits<Simd<[u32; 2]>> for u16x4[src]

impl FromBits<Simd<[u32; 2]>> for i32x2[src]

impl FromBits<Simd<[u32; 2]>> for f32x2[src]

impl FromBits<Simd<[u32; 2]>> for __m64[src]

impl FromBits<Simd<[u32; 4]>> for i8x16[src]

impl FromBits<Simd<[u32; 4]>> for u8x16[src]

impl FromBits<Simd<[u32; 4]>> for u128x1[src]

impl FromBits<Simd<[u32; 4]>> for __m128[src]

impl FromBits<Simd<[u32; 4]>> for __m128i[src]

impl FromBits<Simd<[u32; 4]>> for __m128d[src]

impl FromBits<Simd<[u32; 4]>> for i16x8[src]

impl FromBits<Simd<[u32; 4]>> for u16x8[src]

impl FromBits<Simd<[u32; 4]>> for i32x4[src]

impl FromBits<Simd<[u32; 4]>> for f32x4[src]

impl FromBits<Simd<[u32; 4]>> for i64x2[src]

impl FromBits<Simd<[u32; 4]>> for u64x2[src]

impl FromBits<Simd<[u32; 4]>> for f64x2[src]

impl FromBits<Simd<[u32; 4]>> for i128x1[src]

impl FromBits<Simd<[u32; 8]>> for i8x32[src]

impl FromBits<Simd<[u32; 8]>> for u8x32[src]

impl FromBits<Simd<[u32; 8]>> for u128x2[src]

impl FromBits<Simd<[u32; 8]>> for __m256[src]

impl FromBits<Simd<[u32; 8]>> for __m256i[src]

impl FromBits<Simd<[u32; 8]>> for __m256d[src]

impl FromBits<Simd<[u32; 8]>> for i16x16[src]

impl FromBits<Simd<[u32; 8]>> for u16x16[src]

impl FromBits<Simd<[u32; 8]>> for i32x8[src]

impl FromBits<Simd<[u32; 8]>> for f32x8[src]

impl FromBits<Simd<[u32; 8]>> for i64x4[src]

impl FromBits<Simd<[u32; 8]>> for u64x4[src]

impl FromBits<Simd<[u32; 8]>> for f64x4[src]

impl FromBits<Simd<[u32; 8]>> for i128x2[src]

impl FromBits<Simd<[u64; 2]>> for i8x16[src]

impl FromBits<Simd<[u64; 2]>> for u8x16[src]

impl FromBits<Simd<[u64; 2]>> for u128x1[src]

impl FromBits<Simd<[u64; 2]>> for __m128[src]

impl FromBits<Simd<[u64; 2]>> for __m128i[src]

impl FromBits<Simd<[u64; 2]>> for __m128d[src]

impl FromBits<Simd<[u64; 2]>> for i16x8[src]

impl FromBits<Simd<[u64; 2]>> for u16x8[src]

impl FromBits<Simd<[u64; 2]>> for i32x4[src]

impl FromBits<Simd<[u64; 2]>> for u32x4[src]

impl FromBits<Simd<[u64; 2]>> for f32x4[src]

impl FromBits<Simd<[u64; 2]>> for i64x2[src]

impl FromBits<Simd<[u64; 2]>> for f64x2[src]

impl FromBits<Simd<[u64; 2]>> for i128x1[src]

impl FromBits<Simd<[u64; 4]>> for i8x32[src]

impl FromBits<Simd<[u64; 4]>> for u8x32[src]

impl FromBits<Simd<[u64; 4]>> for u128x2[src]

impl FromBits<Simd<[u64; 4]>> for __m256[src]

impl FromBits<Simd<[u64; 4]>> for __m256i[src]

impl FromBits<Simd<[u64; 4]>> for __m256d[src]

impl FromBits<Simd<[u64; 4]>> for i16x16[src]

impl FromBits<Simd<[u64; 4]>> for u16x16[src]

impl FromBits<Simd<[u64; 4]>> for i32x8[src]

impl FromBits<Simd<[u64; 4]>> for u32x8[src]

impl FromBits<Simd<[u64; 4]>> for f32x8[src]

impl FromBits<Simd<[u64; 4]>> for i64x4[src]

impl FromBits<Simd<[u64; 4]>> for f64x4[src]

impl FromBits<Simd<[u64; 4]>> for i128x2[src]

impl FromBits<Simd<[u64; 8]>> for i8x64[src]

impl FromBits<Simd<[u64; 8]>> for u8x64[src]

impl FromBits<Simd<[u64; 8]>> for u128x4[src]

impl FromBits<Simd<[u64; 8]>> for i16x32[src]

impl FromBits<Simd<[u64; 8]>> for u16x32[src]

impl FromBits<Simd<[u64; 8]>> for i32x16[src]

impl FromBits<Simd<[u64; 8]>> for u32x16[src]

impl FromBits<Simd<[u64; 8]>> for f32x16[src]

impl FromBits<Simd<[u64; 8]>> for i64x8[src]

impl FromBits<Simd<[u64; 8]>> for f64x8[src]

impl FromBits<Simd<[u64; 8]>> for i128x4[src]

impl FromBits<Simd<[u8; 16]>> for i8x16[src]

impl FromBits<Simd<[u8; 16]>> for i16x8[src]

impl FromBits<Simd<[u8; 16]>> for u128x1[src]

impl FromBits<Simd<[u8; 16]>> for __m128[src]

impl FromBits<Simd<[u8; 16]>> for __m128i[src]

impl FromBits<Simd<[u8; 16]>> for __m128d[src]

impl FromBits<Simd<[u8; 16]>> for u16x8[src]

impl FromBits<Simd<[u8; 16]>> for i32x4[src]

impl FromBits<Simd<[u8; 16]>> for u32x4[src]

impl FromBits<Simd<[u8; 16]>> for f32x4[src]

impl FromBits<Simd<[u8; 16]>> for i64x2[src]

impl FromBits<Simd<[u8; 16]>> for u64x2[src]

impl FromBits<Simd<[u8; 16]>> for f64x2[src]

impl FromBits<Simd<[u8; 16]>> for i128x1[src]

impl FromBits<Simd<[u8; 2]>> for i8x2[src]

impl FromBits<Simd<[u8; 32]>> for i8x32[src]

impl FromBits<Simd<[u8; 32]>> for i16x16[src]

impl FromBits<Simd<[u8; 32]>> for u128x2[src]

impl FromBits<Simd<[u8; 32]>> for __m256[src]

impl FromBits<Simd<[u8; 32]>> for __m256i[src]

impl FromBits<Simd<[u8; 32]>> for __m256d[src]

impl FromBits<Simd<[u8; 32]>> for u16x16[src]

impl FromBits<Simd<[u8; 32]>> for i32x8[src]

impl FromBits<Simd<[u8; 32]>> for u32x8[src]

impl FromBits<Simd<[u8; 32]>> for f32x8[src]

impl FromBits<Simd<[u8; 32]>> for i64x4[src]

impl FromBits<Simd<[u8; 32]>> for u64x4[src]

impl FromBits<Simd<[u8; 32]>> for f64x4[src]

impl FromBits<Simd<[u8; 32]>> for i128x2[src]

impl FromBits<Simd<[u8; 4]>> for i8x4[src]

impl FromBits<Simd<[u8; 4]>> for i16x2[src]

impl FromBits<Simd<[u8; 4]>> for u16x2[src]

impl FromBits<Simd<[u8; 64]>> for i8x64[src]

impl FromBits<Simd<[u8; 64]>> for i16x32[src]

impl FromBits<Simd<[u8; 64]>> for u128x4[src]

impl FromBits<Simd<[u8; 64]>> for u16x32[src]

impl FromBits<Simd<[u8; 64]>> for i32x16[src]

impl FromBits<Simd<[u8; 64]>> for u32x16[src]

impl FromBits<Simd<[u8; 64]>> for f32x16[src]

impl FromBits<Simd<[u8; 64]>> for i64x8[src]

impl FromBits<Simd<[u8; 64]>> for u64x8[src]

impl FromBits<Simd<[u8; 64]>> for f64x8[src]

impl FromBits<Simd<[u8; 64]>> for i128x4[src]

impl FromBits<Simd<[u8; 8]>> for i8x8[src]

impl FromBits<Simd<[u8; 8]>> for i16x4[src]

impl FromBits<Simd<[u8; 8]>> for u16x4[src]

impl FromBits<Simd<[u8; 8]>> for i32x2[src]

impl FromBits<Simd<[u8; 8]>> for u32x2[src]

impl FromBits<Simd<[u8; 8]>> for f32x2[src]

impl FromBits<Simd<[u8; 8]>> for __m64[src]

impl FromCast<Simd<[f32; 16]>> for i8x16[src]

impl FromCast<Simd<[f32; 16]>> for u8x16[src]

impl FromCast<Simd<[f32; 16]>> for m8x16[src]

impl FromCast<Simd<[f32; 16]>> for i16x16[src]

impl FromCast<Simd<[f32; 16]>> for u16x16[src]

impl FromCast<Simd<[f32; 16]>> for m16x16[src]

impl FromCast<Simd<[f32; 16]>> for i32x16[src]

impl FromCast<Simd<[f32; 16]>> for u32x16[src]

impl FromCast<Simd<[f32; 16]>> for m32x16[src]

impl FromCast<Simd<[f32; 2]>> for i8x2[src]

impl FromCast<Simd<[f32; 2]>> for u8x2[src]

impl FromCast<Simd<[f32; 2]>> for u64x2[src]

impl FromCast<Simd<[f32; 2]>> for f64x2[src]

impl FromCast<Simd<[f32; 2]>> for m64x2[src]

impl FromCast<Simd<[f32; 2]>> for isizex2[src]

impl FromCast<Simd<[f32; 2]>> for usizex2[src]

impl FromCast<Simd<[f32; 2]>> for msizex2[src]

impl FromCast<Simd<[f32; 2]>> for i128x2[src]

impl FromCast<Simd<[f32; 2]>> for u128x2[src]

impl FromCast<Simd<[f32; 2]>> for m128x2[src]

impl FromCast<Simd<[f32; 2]>> for m8x2[src]

impl FromCast<Simd<[f32; 2]>> for i16x2[src]

impl FromCast<Simd<[f32; 2]>> for u16x2[src]

impl FromCast<Simd<[f32; 2]>> for m16x2[src]

impl FromCast<Simd<[f32; 2]>> for i32x2[src]

impl FromCast<Simd<[f32; 2]>> for u32x2[src]

impl FromCast<Simd<[f32; 2]>> for m32x2[src]

impl FromCast<Simd<[f32; 2]>> for i64x2[src]

impl FromCast<Simd<[f32; 4]>> for i8x4[src]

impl FromCast<Simd<[f32; 4]>> for u8x4[src]

impl FromCast<Simd<[f32; 4]>> for u64x4[src]

impl FromCast<Simd<[f32; 4]>> for f64x4[src]

impl FromCast<Simd<[f32; 4]>> for m64x4[src]

impl FromCast<Simd<[f32; 4]>> for isizex4[src]

impl FromCast<Simd<[f32; 4]>> for usizex4[src]

impl FromCast<Simd<[f32; 4]>> for msizex4[src]

impl FromCast<Simd<[f32; 4]>> for i128x4[src]

impl FromCast<Simd<[f32; 4]>> for u128x4[src]

impl FromCast<Simd<[f32; 4]>> for m128x4[src]

impl FromCast<Simd<[f32; 4]>> for m8x4[src]

impl FromCast<Simd<[f32; 4]>> for i16x4[src]

impl FromCast<Simd<[f32; 4]>> for u16x4[src]

impl FromCast<Simd<[f32; 4]>> for m16x4[src]

impl FromCast<Simd<[f32; 4]>> for i32x4[src]

impl FromCast<Simd<[f32; 4]>> for u32x4[src]

impl FromCast<Simd<[f32; 4]>> for m32x4[src]

impl FromCast<Simd<[f32; 4]>> for i64x4[src]

impl FromCast<Simd<[f32; 8]>> for i8x8[src]

impl FromCast<Simd<[f32; 8]>> for u8x8[src]

impl FromCast<Simd<[f32; 8]>> for u64x8[src]

impl FromCast<Simd<[f32; 8]>> for f64x8[src]

impl FromCast<Simd<[f32; 8]>> for m64x8[src]

impl FromCast<Simd<[f32; 8]>> for isizex8[src]

impl FromCast<Simd<[f32; 8]>> for usizex8[src]

impl FromCast<Simd<[f32; 8]>> for msizex8[src]

impl FromCast<Simd<[f32; 8]>> for m8x8[src]

impl FromCast<Simd<[f32; 8]>> for i16x8[src]

impl FromCast<Simd<[f32; 8]>> for u16x8[src]

impl FromCast<Simd<[f32; 8]>> for m16x8[src]

impl FromCast<Simd<[f32; 8]>> for i32x8[src]

impl FromCast<Simd<[f32; 8]>> for u32x8[src]

impl FromCast<Simd<[f32; 8]>> for m32x8[src]

impl FromCast<Simd<[f32; 8]>> for i64x8[src]

impl FromCast<Simd<[f64; 2]>> for i8x2[src]

impl FromCast<Simd<[f64; 2]>> for u8x2[src]

impl FromCast<Simd<[f64; 2]>> for i64x2[src]

impl FromCast<Simd<[f64; 2]>> for u64x2[src]

impl FromCast<Simd<[f64; 2]>> for m64x2[src]

impl FromCast<Simd<[f64; 2]>> for isizex2[src]

impl FromCast<Simd<[f64; 2]>> for usizex2[src]

impl FromCast<Simd<[f64; 2]>> for msizex2[src]

impl FromCast<Simd<[f64; 2]>> for i128x2[src]

impl FromCast<Simd<[f64; 2]>> for u128x2[src]

impl FromCast<Simd<[f64; 2]>> for m128x2[src]

impl FromCast<Simd<[f64; 2]>> for m8x2[src]

impl FromCast<Simd<[f64; 2]>> for i16x2[src]

impl FromCast<Simd<[f64; 2]>> for u16x2[src]

impl FromCast<Simd<[f64; 2]>> for m16x2[src]

impl FromCast<Simd<[f64; 2]>> for i32x2[src]

impl FromCast<Simd<[f64; 2]>> for u32x2[src]

impl FromCast<Simd<[f64; 2]>> for f32x2[src]

impl FromCast<Simd<[f64; 2]>> for m32x2[src]

impl FromCast<Simd<[f64; 4]>> for i8x4[src]

impl FromCast<Simd<[f64; 4]>> for u8x4[src]

impl FromCast<Simd<[f64; 4]>> for i64x4[src]

impl FromCast<Simd<[f64; 4]>> for u64x4[src]

impl FromCast<Simd<[f64; 4]>> for m64x4[src]

impl FromCast<Simd<[f64; 4]>> for isizex4[src]

impl FromCast<Simd<[f64; 4]>> for usizex4[src]

impl FromCast<Simd<[f64; 4]>> for msizex4[src]

impl FromCast<Simd<[f64; 4]>> for i128x4[src]

impl FromCast<Simd<[f64; 4]>> for u128x4[src]

impl FromCast<Simd<[f64; 4]>> for m128x4[src]

impl FromCast<Simd<[f64; 4]>> for m8x4[src]

impl FromCast<Simd<[f64; 4]>> for i16x4[src]

impl FromCast<Simd<[f64; 4]>> for u16x4[src]

impl FromCast<Simd<[f64; 4]>> for m16x4[src]

impl FromCast<Simd<[f64; 4]>> for i32x4[src]

impl FromCast<Simd<[f64; 4]>> for u32x4[src]

impl FromCast<Simd<[f64; 4]>> for f32x4[src]

impl FromCast<Simd<[f64; 4]>> for m32x4[src]

impl FromCast<Simd<[f64; 8]>> for i8x8[src]

impl FromCast<Simd<[f64; 8]>> for u8x8[src]

impl FromCast<Simd<[f64; 8]>> for i64x8[src]

impl FromCast<Simd<[f64; 8]>> for u64x8[src]

impl FromCast<Simd<[f64; 8]>> for m64x8[src]

impl FromCast<Simd<[f64; 8]>> for isizex8[src]

impl FromCast<Simd<[f64; 8]>> for usizex8[src]

impl FromCast<Simd<[f64; 8]>> for msizex8[src]

impl FromCast<Simd<[f64; 8]>> for m8x8[src]

impl FromCast<Simd<[f64; 8]>> for i16x8[src]

impl FromCast<Simd<[f64; 8]>> for u16x8[src]

impl FromCast<Simd<[f64; 8]>> for m16x8[src]

impl FromCast<Simd<[f64; 8]>> for i32x8[src]

impl FromCast<Simd<[f64; 8]>> for u32x8[src]

impl FromCast<Simd<[f64; 8]>> for f32x8[src]

impl FromCast<Simd<[f64; 8]>> for m32x8[src]

impl FromCast<Simd<[i128; 1]>> for u128x1[src]

impl FromCast<Simd<[i128; 1]>> for m128x1[src]

impl FromCast<Simd<[i128; 2]>> for i8x2[src]

impl FromCast<Simd<[i128; 2]>> for u8x2[src]

impl FromCast<Simd<[i128; 2]>> for i64x2[src]

impl FromCast<Simd<[i128; 2]>> for u64x2[src]

impl FromCast<Simd<[i128; 2]>> for f64x2[src]

impl FromCast<Simd<[i128; 2]>> for m64x2[src]

impl FromCast<Simd<[i128; 2]>> for isizex2[src]

impl FromCast<Simd<[i128; 2]>> for usizex2[src]

impl FromCast<Simd<[i128; 2]>> for msizex2[src]

impl FromCast<Simd<[i128; 2]>> for u128x2[src]

impl FromCast<Simd<[i128; 2]>> for m128x2[src]

impl FromCast<Simd<[i128; 2]>> for m8x2[src]

impl FromCast<Simd<[i128; 2]>> for i16x2[src]

impl FromCast<Simd<[i128; 2]>> for u16x2[src]

impl FromCast<Simd<[i128; 2]>> for m16x2[src]

impl FromCast<Simd<[i128; 2]>> for i32x2[src]

impl FromCast<Simd<[i128; 2]>> for u32x2[src]

impl FromCast<Simd<[i128; 2]>> for f32x2[src]

impl FromCast<Simd<[i128; 2]>> for m32x2[src]

impl FromCast<Simd<[i128; 4]>> for i8x4[src]

impl FromCast<Simd<[i128; 4]>> for u8x4[src]

impl FromCast<Simd<[i128; 4]>> for i64x4[src]

impl FromCast<Simd<[i128; 4]>> for u64x4[src]

impl FromCast<Simd<[i128; 4]>> for f64x4[src]

impl FromCast<Simd<[i128; 4]>> for m64x4[src]

impl FromCast<Simd<[i128; 4]>> for isizex4[src]

impl FromCast<Simd<[i128; 4]>> for usizex4[src]

impl FromCast<Simd<[i128; 4]>> for msizex4[src]

impl FromCast<Simd<[i128; 4]>> for u128x4[src]

impl FromCast<Simd<[i128; 4]>> for m128x4[src]

impl FromCast<Simd<[i128; 4]>> for m8x4[src]

impl FromCast<Simd<[i128; 4]>> for i16x4[src]

impl FromCast<Simd<[i128; 4]>> for u16x4[src]

impl FromCast<Simd<[i128; 4]>> for m16x4[src]

impl FromCast<Simd<[i128; 4]>> for i32x4[src]

impl FromCast<Simd<[i128; 4]>> for u32x4[src]

impl FromCast<Simd<[i128; 4]>> for f32x4[src]

impl FromCast<Simd<[i128; 4]>> for m32x4[src]

impl FromCast<Simd<[i16; 16]>> for i8x16[src]

impl FromCast<Simd<[i16; 16]>> for u8x16[src]

impl FromCast<Simd<[i16; 16]>> for m8x16[src]

impl FromCast<Simd<[i16; 16]>> for u16x16[src]

impl FromCast<Simd<[i16; 16]>> for m16x16[src]

impl FromCast<Simd<[i16; 16]>> for i32x16[src]

impl FromCast<Simd<[i16; 16]>> for u32x16[src]

impl FromCast<Simd<[i16; 16]>> for f32x16[src]

impl FromCast<Simd<[i16; 16]>> for m32x16[src]

impl FromCast<Simd<[i16; 2]>> for i8x2[src]

impl FromCast<Simd<[i16; 2]>> for u8x2[src]

impl FromCast<Simd<[i16; 2]>> for u64x2[src]

impl FromCast<Simd<[i16; 2]>> for f64x2[src]

impl FromCast<Simd<[i16; 2]>> for m64x2[src]

impl FromCast<Simd<[i16; 2]>> for isizex2[src]

impl FromCast<Simd<[i16; 2]>> for usizex2[src]

impl FromCast<Simd<[i16; 2]>> for msizex2[src]

impl FromCast<Simd<[i16; 2]>> for i128x2[src]

impl FromCast<Simd<[i16; 2]>> for u128x2[src]

impl FromCast<Simd<[i16; 2]>> for m128x2[src]

impl FromCast<Simd<[i16; 2]>> for m8x2[src]

impl FromCast<Simd<[i16; 2]>> for u16x2[src]

impl FromCast<Simd<[i16; 2]>> for m16x2[src]

impl FromCast<Simd<[i16; 2]>> for i32x2[src]

impl FromCast<Simd<[i16; 2]>> for u32x2[src]

impl FromCast<Simd<[i16; 2]>> for f32x2[src]

impl FromCast<Simd<[i16; 2]>> for m32x2[src]

impl FromCast<Simd<[i16; 2]>> for i64x2[src]

impl FromCast<Simd<[i16; 32]>> for i8x32[src]

impl FromCast<Simd<[i16; 32]>> for u8x32[src]

impl FromCast<Simd<[i16; 32]>> for m8x32[src]

impl FromCast<Simd<[i16; 32]>> for u16x32[src]

impl FromCast<Simd<[i16; 32]>> for m16x32[src]

impl FromCast<Simd<[i16; 4]>> for i8x4[src]

impl FromCast<Simd<[i16; 4]>> for u8x4[src]

impl FromCast<Simd<[i16; 4]>> for u64x4[src]

impl FromCast<Simd<[i16; 4]>> for f64x4[src]

impl FromCast<Simd<[i16; 4]>> for m64x4[src]

impl FromCast<Simd<[i16; 4]>> for isizex4[src]

impl FromCast<Simd<[i16; 4]>> for usizex4[src]

impl FromCast<Simd<[i16; 4]>> for msizex4[src]

impl FromCast<Simd<[i16; 4]>> for i128x4[src]

impl FromCast<Simd<[i16; 4]>> for u128x4[src]

impl FromCast<Simd<[i16; 4]>> for m128x4[src]

impl FromCast<Simd<[i16; 4]>> for m8x4[src]

impl FromCast<Simd<[i16; 4]>> for u16x4[src]

impl FromCast<Simd<[i16; 4]>> for m16x4[src]

impl FromCast<Simd<[i16; 4]>> for i32x4[src]

impl FromCast<Simd<[i16; 4]>> for u32x4[src]

impl FromCast<Simd<[i16; 4]>> for f32x4[src]

impl FromCast<Simd<[i16; 4]>> for m32x4[src]

impl FromCast<Simd<[i16; 4]>> for i64x4[src]

impl FromCast<Simd<[i16; 8]>> for i8x8[src]

impl FromCast<Simd<[i16; 8]>> for u8x8[src]

impl FromCast<Simd<[i16; 8]>> for u64x8[src]

impl FromCast<Simd<[i16; 8]>> for f64x8[src]

impl FromCast<Simd<[i16; 8]>> for m64x8[src]

impl FromCast<Simd<[i16; 8]>> for isizex8[src]

impl FromCast<Simd<[i16; 8]>> for usizex8[src]

impl FromCast<Simd<[i16; 8]>> for msizex8[src]

impl FromCast<Simd<[i16; 8]>> for m8x8[src]

impl FromCast<Simd<[i16; 8]>> for u16x8[src]

impl FromCast<Simd<[i16; 8]>> for m16x8[src]

impl FromCast<Simd<[i16; 8]>> for i32x8[src]

impl FromCast<Simd<[i16; 8]>> for u32x8[src]

impl FromCast<Simd<[i16; 8]>> for f32x8[src]

impl FromCast<Simd<[i16; 8]>> for m32x8[src]

impl FromCast<Simd<[i16; 8]>> for i64x8[src]

impl FromCast<Simd<[i32; 16]>> for i8x16[src]

impl FromCast<Simd<[i32; 16]>> for u8x16[src]

impl FromCast<Simd<[i32; 16]>> for m8x16[src]

impl FromCast<Simd<[i32; 16]>> for i16x16[src]

impl FromCast<Simd<[i32; 16]>> for u16x16[src]

impl FromCast<Simd<[i32; 16]>> for m16x16[src]

impl FromCast<Simd<[i32; 16]>> for u32x16[src]

impl FromCast<Simd<[i32; 16]>> for f32x16[src]

impl FromCast<Simd<[i32; 16]>> for m32x16[src]

impl FromCast<Simd<[i32; 2]>> for i8x2[src]

impl FromCast<Simd<[i32; 2]>> for u8x2[src]

impl FromCast<Simd<[i32; 2]>> for u64x2[src]

impl FromCast<Simd<[i32; 2]>> for f64x2[src]

impl FromCast<Simd<[i32; 2]>> for m64x2[src]

impl FromCast<Simd<[i32; 2]>> for isizex2[src]

impl FromCast<Simd<[i32; 2]>> for usizex2[src]

impl FromCast<Simd<[i32; 2]>> for msizex2[src]

impl FromCast<Simd<[i32; 2]>> for i128x2[src]

impl FromCast<Simd<[i32; 2]>> for u128x2[src]

impl FromCast<Simd<[i32; 2]>> for m128x2[src]

impl FromCast<Simd<[i32; 2]>> for m8x2[src]

impl FromCast<Simd<[i32; 2]>> for i16x2[src]

impl FromCast<Simd<[i32; 2]>> for u16x2[src]

impl FromCast<Simd<[i32; 2]>> for m16x2[src]

impl FromCast<Simd<[i32; 2]>> for u32x2[src]

impl FromCast<Simd<[i32; 2]>> for f32x2[src]

impl FromCast<Simd<[i32; 2]>> for m32x2[src]

impl FromCast<Simd<[i32; 2]>> for i64x2[src]

impl FromCast<Simd<[i32; 4]>> for i8x4[src]

impl FromCast<Simd<[i32; 4]>> for u8x4[src]

impl FromCast<Simd<[i32; 4]>> for u64x4[src]

impl FromCast<Simd<[i32; 4]>> for f64x4[src]

impl FromCast<Simd<[i32; 4]>> for m64x4[src]

impl FromCast<Simd<[i32; 4]>> for isizex4[src]

impl FromCast<Simd<[i32; 4]>> for usizex4[src]

impl FromCast<Simd<[i32; 4]>> for msizex4[src]

impl FromCast<Simd<[i32; 4]>> for i128x4[src]

impl FromCast<Simd<[i32; 4]>> for u128x4[src]

impl FromCast<Simd<[i32; 4]>> for m128x4[src]

impl FromCast<Simd<[i32; 4]>> for m8x4[src]

impl FromCast<Simd<[i32; 4]>> for i16x4[src]

impl FromCast<Simd<[i32; 4]>> for u16x4[src]

impl FromCast<Simd<[i32; 4]>> for m16x4[src]

impl FromCast<Simd<[i32; 4]>> for u32x4[src]

impl FromCast<Simd<[i32; 4]>> for f32x4[src]

impl FromCast<Simd<[i32; 4]>> for m32x4[src]

impl FromCast<Simd<[i32; 4]>> for i64x4[src]

impl FromCast<Simd<[i32; 8]>> for i8x8[src]

impl FromCast<Simd<[i32; 8]>> for u8x8[src]

impl FromCast<Simd<[i32; 8]>> for u64x8[src]

impl FromCast<Simd<[i32; 8]>> for f64x8[src]

impl FromCast<Simd<[i32; 8]>> for m64x8[src]

impl FromCast<Simd<[i32; 8]>> for isizex8[src]

impl FromCast<Simd<[i32; 8]>> for usizex8[src]

impl FromCast<Simd<[i32; 8]>> for msizex8[src]

impl FromCast<Simd<[i32; 8]>> for m8x8[src]

impl FromCast<Simd<[i32; 8]>> for i16x8[src]

impl FromCast<Simd<[i32; 8]>> for u16x8[src]

impl FromCast<Simd<[i32; 8]>> for m16x8[src]

impl FromCast<Simd<[i32; 8]>> for u32x8[src]

impl FromCast<Simd<[i32; 8]>> for f32x8[src]

impl FromCast<Simd<[i32; 8]>> for m32x8[src]

impl FromCast<Simd<[i32; 8]>> for i64x8[src]

impl FromCast<Simd<[i64; 2]>> for i8x2[src]

impl FromCast<Simd<[i64; 2]>> for u8x2[src]

impl FromCast<Simd<[i64; 2]>> for u64x2[src]

impl FromCast<Simd<[i64; 2]>> for f64x2[src]

impl FromCast<Simd<[i64; 2]>> for m64x2[src]

impl FromCast<Simd<[i64; 2]>> for isizex2[src]

impl FromCast<Simd<[i64; 2]>> for usizex2[src]

impl FromCast<Simd<[i64; 2]>> for msizex2[src]

impl FromCast<Simd<[i64; 2]>> for i128x2[src]

impl FromCast<Simd<[i64; 2]>> for u128x2[src]

impl FromCast<Simd<[i64; 2]>> for m128x2[src]

impl FromCast<Simd<[i64; 2]>> for m8x2[src]

impl FromCast<Simd<[i64; 2]>> for i16x2[src]

impl FromCast<Simd<[i64; 2]>> for u16x2[src]

impl FromCast<Simd<[i64; 2]>> for m16x2[src]

impl FromCast<Simd<[i64; 2]>> for i32x2[src]

impl FromCast<Simd<[i64; 2]>> for u32x2[src]

impl FromCast<Simd<[i64; 2]>> for f32x2[src]

impl FromCast<Simd<[i64; 2]>> for m32x2[src]

impl FromCast<Simd<[i64; 4]>> for i8x4[src]

impl FromCast<Simd<[i64; 4]>> for u8x4[src]

impl FromCast<Simd<[i64; 4]>> for u64x4[src]

impl FromCast<Simd<[i64; 4]>> for f64x4[src]

impl FromCast<Simd<[i64; 4]>> for m64x4[src]

impl FromCast<Simd<[i64; 4]>> for isizex4[src]

impl FromCast<Simd<[i64; 4]>> for usizex4[src]

impl FromCast<Simd<[i64; 4]>> for msizex4[src]

impl FromCast<Simd<[i64; 4]>> for i128x4[src]

impl FromCast<Simd<[i64; 4]>> for u128x4[src]

impl FromCast<Simd<[i64; 4]>> for m128x4[src]

impl FromCast<Simd<[i64; 4]>> for m8x4[src]

impl FromCast<Simd<[i64; 4]>> for i16x4[src]

impl FromCast<Simd<[i64; 4]>> for u16x4[src]

impl FromCast<Simd<[i64; 4]>> for m16x4[src]

impl FromCast<Simd<[i64; 4]>> for i32x4[src]

impl FromCast<Simd<[i64; 4]>> for u32x4[src]

impl FromCast<Simd<[i64; 4]>> for f32x4[src]

impl FromCast<Simd<[i64; 4]>> for m32x4[src]

impl FromCast<Simd<[i64; 8]>> for i8x8[src]

impl FromCast<Simd<[i64; 8]>> for u8x8[src]

impl FromCast<Simd<[i64; 8]>> for u64x8[src]

impl FromCast<Simd<[i64; 8]>> for f64x8[src]

impl FromCast<Simd<[i64; 8]>> for m64x8[src]

impl FromCast<Simd<[i64; 8]>> for isizex8[src]

impl FromCast<Simd<[i64; 8]>> for usizex8[src]

impl FromCast<Simd<[i64; 8]>> for msizex8[src]

impl FromCast<Simd<[i64; 8]>> for m8x8[src]

impl FromCast<Simd<[i64; 8]>> for i16x8[src]

impl FromCast<Simd<[i64; 8]>> for u16x8[src]

impl FromCast<Simd<[i64; 8]>> for m16x8[src]

impl FromCast<Simd<[i64; 8]>> for i32x8[src]

impl FromCast<Simd<[i64; 8]>> for u32x8[src]

impl FromCast<Simd<[i64; 8]>> for f32x8[src]

impl FromCast<Simd<[i64; 8]>> for m32x8[src]

impl FromCast<Simd<[i8; 16]>> for u8x16[src]

impl FromCast<Simd<[i8; 16]>> for m8x16[src]

impl FromCast<Simd<[i8; 16]>> for i16x16[src]

impl FromCast<Simd<[i8; 16]>> for u16x16[src]

impl FromCast<Simd<[i8; 16]>> for m16x16[src]

impl FromCast<Simd<[i8; 16]>> for i32x16[src]

impl FromCast<Simd<[i8; 16]>> for u32x16[src]

impl FromCast<Simd<[i8; 16]>> for f32x16[src]

impl FromCast<Simd<[i8; 16]>> for m32x16[src]

impl FromCast<Simd<[i8; 2]>> for u8x2[src]

impl FromCast<Simd<[i8; 2]>> for m8x2[src]

impl FromCast<Simd<[i8; 2]>> for u64x2[src]

impl FromCast<Simd<[i8; 2]>> for f64x2[src]

impl FromCast<Simd<[i8; 2]>> for m64x2[src]

impl FromCast<Simd<[i8; 2]>> for isizex2[src]

impl FromCast<Simd<[i8; 2]>> for usizex2[src]

impl FromCast<Simd<[i8; 2]>> for msizex2[src]

impl FromCast<Simd<[i8; 2]>> for i128x2[src]

impl FromCast<Simd<[i8; 2]>> for u128x2[src]

impl FromCast<Simd<[i8; 2]>> for m128x2[src]

impl FromCast<Simd<[i8; 2]>> for i16x2[src]

impl FromCast<Simd<[i8; 2]>> for u16x2[src]

impl FromCast<Simd<[i8; 2]>> for m16x2[src]

impl FromCast<Simd<[i8; 2]>> for i32x2[src]

impl FromCast<Simd<[i8; 2]>> for u32x2[src]

impl FromCast<Simd<[i8; 2]>> for f32x2[src]

impl FromCast<Simd<[i8; 2]>> for m32x2[src]

impl FromCast<Simd<[i8; 2]>> for i64x2[src]

impl FromCast<Simd<[i8; 32]>> for u8x32[src]

impl FromCast<Simd<[i8; 32]>> for m8x32[src]

impl FromCast<Simd<[i8; 32]>> for i16x32[src]

impl FromCast<Simd<[i8; 32]>> for u16x32[src]

impl FromCast<Simd<[i8; 32]>> for m16x32[src]

impl FromCast<Simd<[i8; 4]>> for u8x4[src]

impl FromCast<Simd<[i8; 4]>> for m8x4[src]

impl FromCast<Simd<[i8; 4]>> for u64x4[src]

impl FromCast<Simd<[i8; 4]>> for f64x4[src]

impl FromCast<Simd<[i8; 4]>> for m64x4[src]

impl FromCast<Simd<[i8; 4]>> for isizex4[src]

impl FromCast<Simd<[i8; 4]>> for usizex4[src]

impl FromCast<Simd<[i8; 4]>> for msizex4[src]

impl FromCast<Simd<[i8; 4]>> for i128x4[src]

impl FromCast<Simd<[i8; 4]>> for u128x4[src]

impl FromCast<Simd<[i8; 4]>> for m128x4[src]

impl FromCast<Simd<[i8; 4]>> for i16x4[src]

impl FromCast<Simd<[i8; 4]>> for u16x4[src]

impl FromCast<Simd<[i8; 4]>> for m16x4[src]

impl FromCast<Simd<[i8; 4]>> for i32x4[src]

impl FromCast<Simd<[i8; 4]>> for u32x4[src]

impl FromCast<Simd<[i8; 4]>> for f32x4[src]

impl FromCast<Simd<[i8; 4]>> for m32x4[src]

impl FromCast<Simd<[i8; 4]>> for i64x4[src]

impl FromCast<Simd<[i8; 64]>> for u8x64[src]

impl FromCast<Simd<[i8; 64]>> for m8x64[src]

impl FromCast<Simd<[i8; 8]>> for u8x8[src]

impl FromCast<Simd<[i8; 8]>> for m8x8[src]

impl FromCast<Simd<[i8; 8]>> for u64x8[src]

impl FromCast<Simd<[i8; 8]>> for f64x8[src]

impl FromCast<Simd<[i8; 8]>> for m64x8[src]

impl FromCast<Simd<[i8; 8]>> for isizex8[src]

impl FromCast<Simd<[i8; 8]>> for usizex8[src]

impl FromCast<Simd<[i8; 8]>> for msizex8[src]

impl FromCast<Simd<[i8; 8]>> for i16x8[src]

impl FromCast<Simd<[i8; 8]>> for u16x8[src]

impl FromCast<Simd<[i8; 8]>> for m16x8[src]

impl FromCast<Simd<[i8; 8]>> for i32x8[src]

impl FromCast<Simd<[i8; 8]>> for u32x8[src]

impl FromCast<Simd<[i8; 8]>> for f32x8[src]

impl FromCast<Simd<[i8; 8]>> for m32x8[src]

impl FromCast<Simd<[i8; 8]>> for i64x8[src]

impl FromCast<Simd<[isize; 2]>> for i8x2[src]

impl FromCast<Simd<[isize; 2]>> for u8x2[src]

impl FromCast<Simd<[isize; 2]>> for i64x2[src]

impl FromCast<Simd<[isize; 2]>> for u64x2[src]

impl FromCast<Simd<[isize; 2]>> for f64x2[src]

impl FromCast<Simd<[isize; 2]>> for m64x2[src]

impl FromCast<Simd<[isize; 2]>> for usizex2[src]

impl FromCast<Simd<[isize; 2]>> for msizex2[src]

impl FromCast<Simd<[isize; 2]>> for i128x2[src]

impl FromCast<Simd<[isize; 2]>> for u128x2[src]

impl FromCast<Simd<[isize; 2]>> for m128x2[src]

impl FromCast<Simd<[isize; 2]>> for m8x2[src]

impl FromCast<Simd<[isize; 2]>> for i16x2[src]

impl FromCast<Simd<[isize; 2]>> for u16x2[src]

impl FromCast<Simd<[isize; 2]>> for m16x2[src]

impl FromCast<Simd<[isize; 2]>> for i32x2[src]

impl FromCast<Simd<[isize; 2]>> for u32x2[src]

impl FromCast<Simd<[isize; 2]>> for f32x2[src]

impl FromCast<Simd<[isize; 2]>> for m32x2[src]

impl FromCast<Simd<[isize; 4]>> for i8x4[src]

impl FromCast<Simd<[isize; 4]>> for u8x4[src]

impl FromCast<Simd<[isize; 4]>> for i64x4[src]

impl FromCast<Simd<[isize; 4]>> for u64x4[src]

impl FromCast<Simd<[isize; 4]>> for f64x4[src]

impl FromCast<Simd<[isize; 4]>> for m64x4[src]

impl FromCast<Simd<[isize; 4]>> for usizex4[src]

impl FromCast<Simd<[isize; 4]>> for msizex4[src]

impl FromCast<Simd<[isize; 4]>> for i128x4[src]

impl FromCast<Simd<[isize; 4]>> for u128x4[src]

impl FromCast<Simd<[isize; 4]>> for m128x4[src]

impl FromCast<Simd<[isize; 4]>> for m8x4[src]

impl FromCast<Simd<[isize; 4]>> for i16x4[src]

impl FromCast<Simd<[isize; 4]>> for u16x4[src]

impl FromCast<Simd<[isize; 4]>> for m16x4[src]

impl FromCast<Simd<[isize; 4]>> for i32x4[src]

impl FromCast<Simd<[isize; 4]>> for u32x4[src]

impl FromCast<Simd<[isize; 4]>> for f32x4[src]

impl FromCast<Simd<[isize; 4]>> for m32x4[src]

impl FromCast<Simd<[isize; 8]>> for i8x8[src]

impl FromCast<Simd<[isize; 8]>> for u8x8[src]

impl FromCast<Simd<[isize; 8]>> for i64x8[src]

impl FromCast<Simd<[isize; 8]>> for u64x8[src]

impl FromCast<Simd<[isize; 8]>> for f64x8[src]

impl FromCast<Simd<[isize; 8]>> for m64x8[src]

impl FromCast<Simd<[isize; 8]>> for usizex8[src]

impl FromCast<Simd<[isize; 8]>> for msizex8[src]

impl FromCast<Simd<[isize; 8]>> for m8x8[src]

impl FromCast<Simd<[isize; 8]>> for i16x8[src]

impl FromCast<Simd<[isize; 8]>> for u16x8[src]

impl FromCast<Simd<[isize; 8]>> for m16x8[src]

impl FromCast<Simd<[isize; 8]>> for i32x8[src]

impl FromCast<Simd<[isize; 8]>> for u32x8[src]

impl FromCast<Simd<[isize; 8]>> for f32x8[src]

impl FromCast<Simd<[isize; 8]>> for m32x8[src]

impl FromCast<Simd<[m128; 1]>> for i128x1[src]

impl FromCast<Simd<[m128; 1]>> for u128x1[src]

impl FromCast<Simd<[m128; 2]>> for i8x2[src]

impl FromCast<Simd<[m128; 2]>> for u8x2[src]

impl FromCast<Simd<[m128; 2]>> for i64x2[src]

impl FromCast<Simd<[m128; 2]>> for u64x2[src]

impl FromCast<Simd<[m128; 2]>> for f64x2[src]

impl FromCast<Simd<[m128; 2]>> for m64x2[src]

impl FromCast<Simd<[m128; 2]>> for isizex2[src]

impl FromCast<Simd<[m128; 2]>> for usizex2[src]

impl FromCast<Simd<[m128; 2]>> for msizex2[src]

impl FromCast<Simd<[m128; 2]>> for i128x2[src]

impl FromCast<Simd<[m128; 2]>> for u128x2[src]

impl FromCast<Simd<[m128; 2]>> for m8x2[src]

impl FromCast<Simd<[m128; 2]>> for i16x2[src]

impl FromCast<Simd<[m128; 2]>> for u16x2[src]

impl FromCast<Simd<[m128; 2]>> for m16x2[src]

impl FromCast<Simd<[m128; 2]>> for i32x2[src]

impl FromCast<Simd<[m128; 2]>> for u32x2[src]

impl FromCast<Simd<[m128; 2]>> for f32x2[src]

impl FromCast<Simd<[m128; 2]>> for m32x2[src]

impl FromCast<Simd<[m128; 4]>> for i8x4[src]

impl FromCast<Simd<[m128; 4]>> for u8x4[src]

impl FromCast<Simd<[m128; 4]>> for i64x4[src]

impl FromCast<Simd<[m128; 4]>> for u64x4[src]

impl FromCast<Simd<[m128; 4]>> for f64x4[src]

impl FromCast<Simd<[m128; 4]>> for m64x4[src]

impl FromCast<Simd<[m128; 4]>> for isizex4[src]

impl FromCast<Simd<[m128; 4]>> for usizex4[src]

impl FromCast<Simd<[m128; 4]>> for msizex4[src]

impl FromCast<Simd<[m128; 4]>> for i128x4[src]

impl FromCast<Simd<[m128; 4]>> for u128x4[src]

impl FromCast<Simd<[m128; 4]>> for m8x4[src]

impl FromCast<Simd<[m128; 4]>> for i16x4[src]

impl FromCast<Simd<[m128; 4]>> for u16x4[src]

impl FromCast<Simd<[m128; 4]>> for m16x4[src]

impl FromCast<Simd<[m128; 4]>> for i32x4[src]

impl FromCast<Simd<[m128; 4]>> for u32x4[src]

impl FromCast<Simd<[m128; 4]>> for f32x4[src]

impl FromCast<Simd<[m128; 4]>> for m32x4[src]

impl FromCast<Simd<[m16; 16]>> for i8x16[src]

impl FromCast<Simd<[m16; 16]>> for u8x16[src]

impl FromCast<Simd<[m16; 16]>> for m8x16[src]

impl FromCast<Simd<[m16; 16]>> for i16x16[src]

impl FromCast<Simd<[m16; 16]>> for u16x16[src]

impl FromCast<Simd<[m16; 16]>> for i32x16[src]

impl FromCast<Simd<[m16; 16]>> for u32x16[src]

impl FromCast<Simd<[m16; 16]>> for f32x16[src]

impl FromCast<Simd<[m16; 16]>> for m32x16[src]

impl FromCast<Simd<[m16; 2]>> for i8x2[src]

impl FromCast<Simd<[m16; 2]>> for u8x2[src]

impl FromCast<Simd<[m16; 2]>> for u64x2[src]

impl FromCast<Simd<[m16; 2]>> for f64x2[src]

impl FromCast<Simd<[m16; 2]>> for m64x2[src]

impl FromCast<Simd<[m16; 2]>> for isizex2[src]

impl FromCast<Simd<[m16; 2]>> for usizex2[src]

impl FromCast<Simd<[m16; 2]>> for msizex2[src]

impl FromCast<Simd<[m16; 2]>> for i128x2[src]

impl FromCast<Simd<[m16; 2]>> for u128x2[src]

impl FromCast<Simd<[m16; 2]>> for m128x2[src]

impl FromCast<Simd<[m16; 2]>> for m8x2[src]

impl FromCast<Simd<[m16; 2]>> for i16x2[src]

impl FromCast<Simd<[m16; 2]>> for u16x2[src]

impl FromCast<Simd<[m16; 2]>> for i32x2[src]

impl FromCast<Simd<[m16; 2]>> for u32x2[src]

impl FromCast<Simd<[m16; 2]>> for f32x2[src]

impl FromCast<Simd<[m16; 2]>> for m32x2[src]

impl FromCast<Simd<[m16; 2]>> for i64x2[src]

impl FromCast<Simd<[m16; 32]>> for i8x32[src]

impl FromCast<Simd<[m16; 32]>> for u8x32[src]

impl FromCast<Simd<[m16; 32]>> for m8x32[src]

impl FromCast<Simd<[m16; 32]>> for i16x32[src]

impl FromCast<Simd<[m16; 32]>> for u16x32[src]

impl FromCast<Simd<[m16; 4]>> for i8x4[src]

impl FromCast<Simd<[m16; 4]>> for u8x4[src]

impl FromCast<Simd<[m16; 4]>> for u64x4[src]

impl FromCast<Simd<[m16; 4]>> for f64x4[src]

impl FromCast<Simd<[m16; 4]>> for m64x4[src]

impl FromCast<Simd<[m16; 4]>> for isizex4[src]

impl FromCast<Simd<[m16; 4]>> for usizex4[src]

impl FromCast<Simd<[m16; 4]>> for msizex4[src]

impl FromCast<Simd<[m16; 4]>> for i128x4[src]

impl FromCast<Simd<[m16; 4]>> for u128x4[src]

impl FromCast<Simd<[m16; 4]>> for m128x4[src]

impl FromCast<Simd<[m16; 4]>> for m8x4[src]

impl FromCast<Simd<[m16; 4]>> for i16x4[src]

impl FromCast<Simd<[m16; 4]>> for u16x4[src]

impl FromCast<Simd<[m16; 4]>> for i32x4[src]

impl FromCast<Simd<[m16; 4]>> for u32x4[src]

impl FromCast<Simd<[m16; 4]>> for f32x4[src]

impl FromCast<Simd<[m16; 4]>> for m32x4[src]

impl FromCast<Simd<[m16; 4]>> for i64x4[src]

impl FromCast<Simd<[m16; 8]>> for i8x8[src]

impl FromCast<Simd<[m16; 8]>> for u8x8[src]

impl FromCast<Simd<[m16; 8]>> for u64x8[src]

impl FromCast<Simd<[m16; 8]>> for f64x8[src]

impl FromCast<Simd<[m16; 8]>> for m64x8[src]

impl FromCast<Simd<[m16; 8]>> for isizex8[src]

impl FromCast<Simd<[m16; 8]>> for usizex8[src]

impl FromCast<Simd<[m16; 8]>> for msizex8[src]

impl FromCast<Simd<[m16; 8]>> for m8x8[src]

impl FromCast<Simd<[m16; 8]>> for i16x8[src]

impl FromCast<Simd<[m16; 8]>> for u16x8[src]

impl FromCast<Simd<[m16; 8]>> for i32x8[src]

impl FromCast<Simd<[m16; 8]>> for u32x8[src]

impl FromCast<Simd<[m16; 8]>> for f32x8[src]

impl FromCast<Simd<[m16; 8]>> for m32x8[src]

impl FromCast<Simd<[m16; 8]>> for i64x8[src]

impl FromCast<Simd<[m32; 16]>> for i8x16[src]

impl FromCast<Simd<[m32; 16]>> for u8x16[src]

impl FromCast<Simd<[m32; 16]>> for m8x16[src]

impl FromCast<Simd<[m32; 16]>> for i16x16[src]

impl FromCast<Simd<[m32; 16]>> for u16x16[src]

impl FromCast<Simd<[m32; 16]>> for m16x16[src]

impl FromCast<Simd<[m32; 16]>> for i32x16[src]

impl FromCast<Simd<[m32; 16]>> for u32x16[src]

impl FromCast<Simd<[m32; 16]>> for f32x16[src]

impl FromCast<Simd<[m32; 2]>> for i8x2[src]

impl FromCast<Simd<[m32; 2]>> for u8x2[src]

impl FromCast<Simd<[m32; 2]>> for u64x2[src]

impl FromCast<Simd<[m32; 2]>> for f64x2[src]

impl FromCast<Simd<[m32; 2]>> for m64x2[src]

impl FromCast<Simd<[m32; 2]>> for isizex2[src]

impl FromCast<Simd<[m32; 2]>> for usizex2[src]

impl FromCast<Simd<[m32; 2]>> for msizex2[src]

impl FromCast<Simd<[m32; 2]>> for i128x2[src]

impl FromCast<Simd<[m32; 2]>> for u128x2[src]

impl FromCast<Simd<[m32; 2]>> for m128x2[src]

impl FromCast<Simd<[m32; 2]>> for m8x2[src]

impl FromCast<Simd<[m32; 2]>> for i16x2[src]

impl FromCast<Simd<[m32; 2]>> for u16x2[src]

impl FromCast<Simd<[m32; 2]>> for m16x2[src]

impl FromCast<Simd<[m32; 2]>> for i32x2[src]

impl FromCast<Simd<[m32; 2]>> for u32x2[src]

impl FromCast<Simd<[m32; 2]>> for f32x2[src]

impl FromCast<Simd<[m32; 2]>> for i64x2[src]

impl FromCast<Simd<[m32; 4]>> for i8x4[src]

impl FromCast<Simd<[m32; 4]>> for u8x4[src]

impl FromCast<Simd<[m32; 4]>> for u64x4[src]

impl FromCast<Simd<[m32; 4]>> for f64x4[src]

impl FromCast<Simd<[m32; 4]>> for m64x4[src]

impl FromCast<Simd<[m32; 4]>> for isizex4[src]

impl FromCast<Simd<[m32; 4]>> for usizex4[src]

impl FromCast<Simd<[m32; 4]>> for msizex4[src]

impl FromCast<Simd<[m32; 4]>> for i128x4[src]

impl FromCast<Simd<[m32; 4]>> for u128x4[src]

impl FromCast<Simd<[m32; 4]>> for m128x4[src]

impl FromCast<Simd<[m32; 4]>> for m8x4[src]

impl FromCast<Simd<[m32; 4]>> for i16x4[src]

impl FromCast<Simd<[m32; 4]>> for u16x4[src]

impl FromCast<Simd<[m32; 4]>> for m16x4[src]

impl FromCast<Simd<[m32; 4]>> for i32x4[src]

impl FromCast<Simd<[m32; 4]>> for u32x4[src]

impl FromCast<Simd<[m32; 4]>> for f32x4[src]

impl FromCast<Simd<[m32; 4]>> for i64x4[src]

impl FromCast<Simd<[m32; 8]>> for i8x8[src]

impl FromCast<Simd<[m32; 8]>> for u8x8[src]

impl FromCast<Simd<[m32; 8]>> for u64x8[src]

impl FromCast<Simd<[m32; 8]>> for f64x8[src]

impl FromCast<Simd<[m32; 8]>> for m64x8[src]

impl FromCast<Simd<[m32; 8]>> for isizex8[src]

impl FromCast<Simd<[m32; 8]>> for usizex8[src]

impl FromCast<Simd<[m32; 8]>> for msizex8[src]

impl FromCast<Simd<[m32; 8]>> for m8x8[src]

impl FromCast<Simd<[m32; 8]>> for i16x8[src]

impl FromCast<Simd<[m32; 8]>> for u16x8[src]

impl FromCast<Simd<[m32; 8]>> for m16x8[src]

impl FromCast<Simd<[m32; 8]>> for i32x8[src]

impl FromCast<Simd<[m32; 8]>> for u32x8[src]

impl FromCast<Simd<[m32; 8]>> for f32x8[src]

impl FromCast<Simd<[m32; 8]>> for i64x8[src]

impl FromCast<Simd<[m64; 2]>> for i8x2[src]

impl FromCast<Simd<[m64; 2]>> for u8x2[src]

impl FromCast<Simd<[m64; 2]>> for i64x2[src]

impl FromCast<Simd<[m64; 2]>> for u64x2[src]

impl FromCast<Simd<[m64; 2]>> for f64x2[src]

impl FromCast<Simd<[m64; 2]>> for isizex2[src]

impl FromCast<Simd<[m64; 2]>> for usizex2[src]

impl FromCast<Simd<[m64; 2]>> for msizex2[src]

impl FromCast<Simd<[m64; 2]>> for i128x2[src]

impl FromCast<Simd<[m64; 2]>> for u128x2[src]

impl FromCast<Simd<[m64; 2]>> for m128x2[src]

impl FromCast<Simd<[m64; 2]>> for m8x2[src]

impl FromCast<Simd<[m64; 2]>> for i16x2[src]

impl FromCast<Simd<[m64; 2]>> for u16x2[src]

impl FromCast<Simd<[m64; 2]>> for m16x2[src]

impl FromCast<Simd<[m64; 2]>> for i32x2[src]

impl FromCast<Simd<[m64; 2]>> for u32x2[src]

impl FromCast<Simd<[m64; 2]>> for f32x2[src]

impl FromCast<Simd<[m64; 2]>> for m32x2[src]

impl FromCast<Simd<[m64; 4]>> for i8x4[src]

impl FromCast<Simd<[m64; 4]>> for u8x4[src]

impl FromCast<Simd<[m64; 4]>> for i64x4[src]

impl FromCast<Simd<[m64; 4]>> for u64x4[src]

impl FromCast<Simd<[m64; 4]>> for f64x4[src]

impl FromCast<Simd<[m64; 4]>> for isizex4[src]

impl FromCast<Simd<[m64; 4]>> for usizex4[src]

impl FromCast<Simd<[m64; 4]>> for msizex4[src]

impl FromCast<Simd<[m64; 4]>> for i128x4[src]

impl FromCast<Simd<[m64; 4]>> for u128x4[src]

impl FromCast<Simd<[m64; 4]>> for m128x4[src]

impl FromCast<Simd<[m64; 4]>> for m8x4[src]

impl FromCast<Simd<[m64; 4]>> for i16x4[src]

impl FromCast<Simd<[m64; 4]>> for u16x4[src]

impl FromCast<Simd<[m64; 4]>> for m16x4[src]

impl FromCast<Simd<[m64; 4]>> for i32x4[src]

impl FromCast<Simd<[m64; 4]>> for u32x4[src]

impl FromCast<Simd<[m64; 4]>> for f32x4[src]

impl FromCast<Simd<[m64; 4]>> for m32x4[src]

impl FromCast<Simd<[m64; 8]>> for i8x8[src]

impl FromCast<Simd<[m64; 8]>> for u8x8[src]

impl FromCast<Simd<[m64; 8]>> for i64x8[src]

impl FromCast<Simd<[m64; 8]>> for u64x8[src]

impl FromCast<Simd<[m64; 8]>> for f64x8[src]

impl FromCast<Simd<[m64; 8]>> for isizex8[src]

impl FromCast<Simd<[m64; 8]>> for usizex8[src]

impl FromCast<Simd<[m64; 8]>> for msizex8[src]

impl FromCast<Simd<[m64; 8]>> for m8x8[src]

impl FromCast<Simd<[m64; 8]>> for i16x8[src]

impl FromCast<Simd<[m64; 8]>> for u16x8[src]

impl FromCast<Simd<[m64; 8]>> for m16x8[src]

impl FromCast<Simd<[m64; 8]>> for i32x8[src]

impl FromCast<Simd<[m64; 8]>> for u32x8[src]

impl FromCast<Simd<[m64; 8]>> for f32x8[src]

impl FromCast<Simd<[m64; 8]>> for m32x8[src]

impl FromCast<Simd<[m8; 16]>> for i8x16[src]

impl FromCast<Simd<[m8; 16]>> for u8x16[src]

impl FromCast<Simd<[m8; 16]>> for i16x16[src]

impl FromCast<Simd<[m8; 16]>> for u16x16[src]

impl FromCast<Simd<[m8; 16]>> for m16x16[src]

impl FromCast<Simd<[m8; 16]>> for i32x16[src]

impl FromCast<Simd<[m8; 16]>> for u32x16[src]

impl FromCast<Simd<[m8; 16]>> for f32x16[src]

impl FromCast<Simd<[m8; 16]>> for m32x16[src]

impl FromCast<Simd<[m8; 2]>> for i8x2[src]

impl FromCast<Simd<[m8; 2]>> for u8x2[src]

impl FromCast<Simd<[m8; 2]>> for u64x2[src]

impl FromCast<Simd<[m8; 2]>> for f64x2[src]

impl FromCast<Simd<[m8; 2]>> for m64x2[src]

impl FromCast<Simd<[m8; 2]>> for isizex2[src]

impl FromCast<Simd<[m8; 2]>> for usizex2[src]

impl FromCast<Simd<[m8; 2]>> for msizex2[src]

impl FromCast<Simd<[m8; 2]>> for i128x2[src]

impl FromCast<Simd<[m8; 2]>> for u128x2[src]

impl FromCast<Simd<[m8; 2]>> for m128x2[src]

impl FromCast<Simd<[m8; 2]>> for i16x2[src]

impl FromCast<Simd<[m8; 2]>> for u16x2[src]

impl FromCast<Simd<[m8; 2]>> for m16x2[src]

impl FromCast<Simd<[m8; 2]>> for i32x2[src]

impl FromCast<Simd<[m8; 2]>> for u32x2[src]

impl FromCast<Simd<[m8; 2]>> for f32x2[src]

impl FromCast<Simd<[m8; 2]>> for m32x2[src]

impl FromCast<Simd<[m8; 2]>> for i64x2[src]

impl FromCast<Simd<[m8; 32]>> for i8x32[src]

impl FromCast<Simd<[m8; 32]>> for u8x32[src]

impl FromCast<Simd<[m8; 32]>> for i16x32[src]

impl FromCast<Simd<[m8; 32]>> for u16x32[src]

impl FromCast<Simd<[m8; 32]>> for m16x32[src]

impl FromCast<Simd<[m8; 4]>> for i8x4[src]

impl FromCast<Simd<[m8; 4]>> for u8x4[src]

impl FromCast<Simd<[m8; 4]>> for u64x4[src]

impl FromCast<Simd<[m8; 4]>> for f64x4[src]

impl FromCast<Simd<[m8; 4]>> for m64x4[src]

impl FromCast<Simd<[m8; 4]>> for isizex4[src]

impl FromCast<Simd<[m8; 4]>> for usizex4[src]

impl FromCast<Simd<[m8; 4]>> for msizex4[src]

impl FromCast<Simd<[m8; 4]>> for i128x4[src]

impl FromCast<Simd<[m8; 4]>> for u128x4[src]

impl FromCast<Simd<[m8; 4]>> for m128x4[src]

impl FromCast<Simd<[m8; 4]>> for i16x4[src]

impl FromCast<Simd<[m8; 4]>> for u16x4[src]

impl FromCast<Simd<[m8; 4]>> for m16x4[src]

impl FromCast<Simd<[m8; 4]>> for i32x4[src]

impl FromCast<Simd<[m8; 4]>> for u32x4[src]

impl FromCast<Simd<[m8; 4]>> for f32x4[src]

impl FromCast<Simd<[m8; 4]>> for m32x4[src]

impl FromCast<Simd<[m8; 4]>> for i64x4[src]

impl FromCast<Simd<[m8; 64]>> for i8x64[src]

impl FromCast<Simd<[m8; 64]>> for u8x64[src]

impl FromCast<Simd<[m8; 8]>> for i8x8[src]

impl FromCast<Simd<[m8; 8]>> for u8x8[src]

impl FromCast<Simd<[m8; 8]>> for u64x8[src]

impl FromCast<Simd<[m8; 8]>> for f64x8[src]

impl FromCast<Simd<[m8; 8]>> for m64x8[src]

impl FromCast<Simd<[m8; 8]>> for isizex8[src]

impl FromCast<Simd<[m8; 8]>> for usizex8[src]

impl FromCast<Simd<[m8; 8]>> for msizex8[src]

impl FromCast<Simd<[m8; 8]>> for i16x8[src]

impl FromCast<Simd<[m8; 8]>> for u16x8[src]

impl FromCast<Simd<[m8; 8]>> for m16x8[src]

impl FromCast<Simd<[m8; 8]>> for i32x8[src]

impl FromCast<Simd<[m8; 8]>> for u32x8[src]

impl FromCast<Simd<[m8; 8]>> for f32x8[src]

impl FromCast<Simd<[m8; 8]>> for m32x8[src]

impl FromCast<Simd<[m8; 8]>> for i64x8[src]

impl FromCast<Simd<[msize; 2]>> for i8x2[src]

impl FromCast<Simd<[msize; 2]>> for u8x2[src]

impl FromCast<Simd<[msize; 2]>> for i64x2[src]

impl FromCast<Simd<[msize; 2]>> for u64x2[src]

impl FromCast<Simd<[msize; 2]>> for f64x2[src]

impl FromCast<Simd<[msize; 2]>> for m64x2[src]

impl FromCast<Simd<[msize; 2]>> for isizex2[src]

impl FromCast<Simd<[msize; 2]>> for usizex2[src]

impl FromCast<Simd<[msize; 2]>> for i128x2[src]

impl FromCast<Simd<[msize; 2]>> for u128x2[src]

impl FromCast<Simd<[msize; 2]>> for m128x2[src]

impl FromCast<Simd<[msize; 2]>> for m8x2[src]

impl FromCast<Simd<[msize; 2]>> for i16x2[src]

impl FromCast<Simd<[msize; 2]>> for u16x2[src]

impl FromCast<Simd<[msize; 2]>> for m16x2[src]

impl FromCast<Simd<[msize; 2]>> for i32x2[src]

impl FromCast<Simd<[msize; 2]>> for u32x2[src]

impl FromCast<Simd<[msize; 2]>> for f32x2[src]

impl FromCast<Simd<[msize; 2]>> for m32x2[src]

impl FromCast<Simd<[msize; 4]>> for i8x4[src]

impl FromCast<Simd<[msize; 4]>> for u8x4[src]

impl FromCast<Simd<[msize; 4]>> for i64x4[src]

impl FromCast<Simd<[msize; 4]>> for u64x4[src]

impl FromCast<Simd<[msize; 4]>> for f64x4[src]

impl FromCast<Simd<[msize; 4]>> for m64x4[src]

impl FromCast<Simd<[msize; 4]>> for isizex4[src]

impl FromCast<Simd<[msize; 4]>> for usizex4[src]

impl FromCast<Simd<[msize; 4]>> for i128x4[src]

impl FromCast<Simd<[msize; 4]>> for u128x4[src]

impl FromCast<Simd<[msize; 4]>> for m128x4[src]

impl FromCast<Simd<[msize; 4]>> for m8x4[src]

impl FromCast<Simd<[msize; 4]>> for i16x4[src]

impl FromCast<Simd<[msize; 4]>> for u16x4[src]

impl FromCast<Simd<[msize; 4]>> for m16x4[src]

impl FromCast<Simd<[msize; 4]>> for i32x4[src]

impl FromCast<Simd<[msize; 4]>> for u32x4[src]

impl FromCast<Simd<[msize; 4]>> for f32x4[src]

impl FromCast<Simd<[msize; 4]>> for m32x4[src]

impl FromCast<Simd<[msize; 8]>> for i8x8[src]

impl FromCast<Simd<[msize; 8]>> for u8x8[src]

impl FromCast<Simd<[msize; 8]>> for i64x8[src]

impl FromCast<Simd<[msize; 8]>> for u64x8[src]

impl FromCast<Simd<[msize; 8]>> for f64x8[src]

impl FromCast<Simd<[msize; 8]>> for m64x8[src]

impl FromCast<Simd<[msize; 8]>> for isizex8[src]

impl FromCast<Simd<[msize; 8]>> for usizex8[src]

impl FromCast<Simd<[msize; 8]>> for m8x8[src]

impl FromCast<Simd<[msize; 8]>> for i16x8[src]

impl FromCast<Simd<[msize; 8]>> for u16x8[src]

impl FromCast<Simd<[msize; 8]>> for m16x8[src]

impl FromCast<Simd<[msize; 8]>> for i32x8[src]

impl FromCast<Simd<[msize; 8]>> for u32x8[src]

impl FromCast<Simd<[msize; 8]>> for f32x8[src]

impl FromCast<Simd<[msize; 8]>> for m32x8[src]

impl FromCast<Simd<[u128; 1]>> for i128x1[src]

impl FromCast<Simd<[u128; 1]>> for m128x1[src]

impl FromCast<Simd<[u128; 2]>> for i8x2[src]

impl FromCast<Simd<[u128; 2]>> for u8x2[src]

impl FromCast<Simd<[u128; 2]>> for i64x2[src]

impl FromCast<Simd<[u128; 2]>> for u64x2[src]

impl FromCast<Simd<[u128; 2]>> for f64x2[src]

impl FromCast<Simd<[u128; 2]>> for m64x2[src]

impl FromCast<Simd<[u128; 2]>> for isizex2[src]

impl FromCast<Simd<[u128; 2]>> for usizex2[src]

impl FromCast<Simd<[u128; 2]>> for msizex2[src]

impl FromCast<Simd<[u128; 2]>> for i128x2[src]

impl FromCast<Simd<[u128; 2]>> for m128x2[src]

impl FromCast<Simd<[u128; 2]>> for m8x2[src]

impl FromCast<Simd<[u128; 2]>> for i16x2[src]

impl FromCast<Simd<[u128; 2]>> for u16x2[src]

impl FromCast<Simd<[u128; 2]>> for m16x2[src]

impl FromCast<Simd<[u128; 2]>> for i32x2[src]

impl FromCast<Simd<[u128; 2]>> for u32x2[src]

impl FromCast<Simd<[u128; 2]>> for f32x2[src]

impl FromCast<Simd<[u128; 2]>> for m32x2[src]

impl FromCast<Simd<[u128; 4]>> for i8x4[src]

impl FromCast<Simd<[u128; 4]>> for u8x4[src]

impl FromCast<Simd<[u128; 4]>> for i64x4[src]

impl FromCast<Simd<[u128; 4]>> for u64x4[src]

impl FromCast<Simd<[u128; 4]>> for f64x4[src]

impl FromCast<Simd<[u128; 4]>> for m64x4[src]

impl FromCast<Simd<[u128; 4]>> for isizex4[src]

impl FromCast<Simd<[u128; 4]>> for usizex4[src]

impl FromCast<Simd<[u128; 4]>> for msizex4[src]

impl FromCast<Simd<[u128; 4]>> for i128x4[src]

impl FromCast<Simd<[u128; 4]>> for m128x4[src]

impl FromCast<Simd<[u128; 4]>> for m8x4[src]

impl FromCast<Simd<[u128; 4]>> for i16x4[src]

impl FromCast<Simd<[u128; 4]>> for u16x4[src]

impl FromCast<Simd<[u128; 4]>> for m16x4[src]

impl FromCast<Simd<[u128; 4]>> for i32x4[src]

impl FromCast<Simd<[u128; 4]>> for u32x4[src]

impl FromCast<Simd<[u128; 4]>> for f32x4[src]

impl FromCast<Simd<[u128; 4]>> for m32x4[src]

impl FromCast<Simd<[u16; 16]>> for i8x16[src]

impl FromCast<Simd<[u16; 16]>> for u8x16[src]

impl FromCast<Simd<[u16; 16]>> for m8x16[src]

impl FromCast<Simd<[u16; 16]>> for i16x16[src]

impl FromCast<Simd<[u16; 16]>> for m16x16[src]

impl FromCast<Simd<[u16; 16]>> for i32x16[src]

impl FromCast<Simd<[u16; 16]>> for u32x16[src]

impl FromCast<Simd<[u16; 16]>> for f32x16[src]

impl FromCast<Simd<[u16; 16]>> for m32x16[src]

impl FromCast<Simd<[u16; 2]>> for i8x2[src]

impl FromCast<Simd<[u16; 2]>> for u8x2[src]

impl FromCast<Simd<[u16; 2]>> for u64x2[src]

impl FromCast<Simd<[u16; 2]>> for f64x2[src]

impl FromCast<Simd<[u16; 2]>> for m64x2[src]

impl FromCast<Simd<[u16; 2]>> for isizex2[src]

impl FromCast<Simd<[u16; 2]>> for usizex2[src]

impl FromCast<Simd<[u16; 2]>> for msizex2[src]

impl FromCast<Simd<[u16; 2]>> for i128x2[src]

impl FromCast<Simd<[u16; 2]>> for u128x2[src]

impl FromCast<Simd<[u16; 2]>> for m128x2[src]

impl FromCast<Simd<[u16; 2]>> for m8x2[src]

impl FromCast<Simd<[u16; 2]>> for i16x2[src]

impl FromCast<Simd<[u16; 2]>> for m16x2[src]

impl FromCast<Simd<[u16; 2]>> for i32x2[src]

impl FromCast<Simd<[u16; 2]>> for u32x2[src]

impl FromCast<Simd<[u16; 2]>> for f32x2[src]

impl FromCast<Simd<[u16; 2]>> for m32x2[src]

impl FromCast<Simd<[u16; 2]>> for i64x2[src]

impl FromCast<Simd<[u16; 32]>> for i8x32[src]

impl FromCast<Simd<[u16; 32]>> for u8x32[src]

impl FromCast<Simd<[u16; 32]>> for m8x32[src]

impl FromCast<Simd<[u16; 32]>> for i16x32[src]

impl FromCast<Simd<[u16; 32]>> for m16x32[src]

impl FromCast<Simd<[u16; 4]>> for i8x4[src]

impl FromCast<Simd<[u16; 4]>> for u8x4[src]

impl FromCast<Simd<[u16; 4]>> for u64x4[src]

impl FromCast<Simd<[u16; 4]>> for f64x4[src]

impl FromCast<Simd<[u16; 4]>> for m64x4[src]

impl FromCast<Simd<[u16; 4]>> for isizex4[src]

impl FromCast<Simd<[u16; 4]>> for usizex4[src]

impl FromCast<Simd<[u16; 4]>> for msizex4[src]

impl FromCast<Simd<[u16; 4]>> for i128x4[src]

impl FromCast<Simd<[u16; 4]>> for u128x4[src]

impl FromCast<Simd<[u16; 4]>> for m128x4[src]

impl FromCast<Simd<[u16; 4]>> for m8x4[src]

impl FromCast<Simd<[u16; 4]>> for i16x4[src]

impl FromCast<Simd<[u16; 4]>> for m16x4[src]

impl FromCast<Simd<[u16; 4]>> for i32x4[src]

impl FromCast<Simd<[u16; 4]>> for u32x4[src]

impl FromCast<Simd<[u16; 4]>> for f32x4[src]

impl FromCast<Simd<[u16; 4]>> for m32x4[src]

impl FromCast<Simd<[u16; 4]>> for i64x4[src]

impl FromCast<Simd<[u16; 8]>> for i8x8[src]

impl FromCast<Simd<[u16; 8]>> for u8x8[src]

impl FromCast<Simd<[u16; 8]>> for u64x8[src]

impl FromCast<Simd<[u16; 8]>> for f64x8[src]

impl FromCast<Simd<[u16; 8]>> for m64x8[src]

impl FromCast<Simd<[u16; 8]>> for isizex8[src]

impl FromCast<Simd<[u16; 8]>> for usizex8[src]

impl FromCast<Simd<[u16; 8]>> for msizex8[src]

impl FromCast<Simd<[u16; 8]>> for m8x8[src]

impl FromCast<Simd<[u16; 8]>> for i16x8[src]

impl FromCast<Simd<[u16; 8]>> for m16x8[src]

impl FromCast<Simd<[u16; 8]>> for i32x8[src]

impl FromCast<Simd<[u16; 8]>> for u32x8[src]

impl FromCast<Simd<[u16; 8]>> for f32x8[src]

impl FromCast<Simd<[u16; 8]>> for m32x8[src]

impl FromCast<Simd<[u16; 8]>> for i64x8[src]

impl FromCast<Simd<[u32; 16]>> for i8x16[src]

impl FromCast<Simd<[u32; 16]>> for u8x16[src]

impl FromCast<Simd<[u32; 16]>> for m8x16[src]

impl FromCast<Simd<[u32; 16]>> for i16x16[src]

impl FromCast<Simd<[u32; 16]>> for u16x16[src]

impl FromCast<Simd<[u32; 16]>> for m16x16[src]

impl FromCast<Simd<[u32; 16]>> for i32x16[src]

impl FromCast<Simd<[u32; 16]>> for f32x16[src]

impl FromCast<Simd<[u32; 16]>> for m32x16[src]

impl FromCast<Simd<[u32; 2]>> for i8x2[src]

impl FromCast<Simd<[u32; 2]>> for u8x2[src]

impl FromCast<Simd<[u32; 2]>> for u64x2[src]

impl FromCast<Simd<[u32; 2]>> for f64x2[src]

impl FromCast<Simd<[u32; 2]>> for m64x2[src]

impl FromCast<Simd<[u32; 2]>> for isizex2[src]

impl FromCast<Simd<[u32; 2]>> for usizex2[src]

impl FromCast<Simd<[u32; 2]>> for msizex2[src]

impl FromCast<Simd<[u32; 2]>> for i128x2[src]

impl FromCast<Simd<[u32; 2]>> for u128x2[src]

impl FromCast<Simd<[u32; 2]>> for m128x2[src]

impl FromCast<Simd<[u32; 2]>> for m8x2[src]

impl FromCast<Simd<[u32; 2]>> for i16x2[src]

impl FromCast<Simd<[u32; 2]>> for u16x2[src]

impl FromCast<Simd<[u32; 2]>> for m16x2[src]

impl FromCast<Simd<[u32; 2]>> for i32x2[src]

impl FromCast<Simd<[u32; 2]>> for f32x2[src]

impl FromCast<Simd<[u32; 2]>> for m32x2[src]

impl FromCast<Simd<[u32; 2]>> for i64x2[src]

impl FromCast<Simd<[u32; 4]>> for i8x4[src]

impl FromCast<Simd<[u32; 4]>> for u8x4[src]

impl FromCast<Simd<[u32; 4]>> for u64x4[src]

impl FromCast<Simd<[u32; 4]>> for f64x4[src]

impl FromCast<Simd<[u32; 4]>> for m64x4[src]

impl FromCast<Simd<[u32; 4]>> for isizex4[src]

impl FromCast<Simd<[u32; 4]>> for usizex4[src]

impl FromCast<Simd<[u32; 4]>> for msizex4[src]

impl FromCast<Simd<[u32; 4]>> for i128x4[src]

impl FromCast<Simd<[u32; 4]>> for u128x4[src]

impl FromCast<Simd<[u32; 4]>> for m128x4[src]

impl FromCast<Simd<[u32; 4]>> for m8x4[src]

impl FromCast<Simd<[u32; 4]>> for i16x4[src]

impl FromCast<Simd<[u32; 4]>> for u16x4[src]

impl FromCast<Simd<[u32; 4]>> for m16x4[src]

impl FromCast<Simd<[u32; 4]>> for i32x4[src]

impl FromCast<Simd<[u32; 4]>> for f32x4[src]

impl FromCast<Simd<[u32; 4]>> for m32x4[src]

impl FromCast<Simd<[u32; 4]>> for i64x4[src]

impl FromCast<Simd<[u32; 8]>> for i8x8[src]

impl FromCast<Simd<[u32; 8]>> for u8x8[src]

impl FromCast<Simd<[u32; 8]>> for u64x8[src]

impl FromCast<Simd<[u32; 8]>> for f64x8[src]

impl FromCast<Simd<[u32; 8]>> for m64x8[src]

impl FromCast<Simd<[u32; 8]>> for isizex8[src]

impl FromCast<Simd<[u32; 8]>> for usizex8[src]

impl FromCast<Simd<[u32; 8]>> for msizex8[src]

impl FromCast<Simd<[u32; 8]>> for m8x8[src]

impl FromCast<Simd<[u32; 8]>> for i16x8[src]

impl FromCast<Simd<[u32; 8]>> for u16x8[src]

impl FromCast<Simd<[u32; 8]>> for m16x8[src]

impl FromCast<Simd<[u32; 8]>> for i32x8[src]

impl FromCast<Simd<[u32; 8]>> for f32x8[src]

impl FromCast<Simd<[u32; 8]>> for m32x8[src]

impl FromCast<Simd<[u32; 8]>> for i64x8[src]

impl FromCast<Simd<[u64; 2]>> for i8x2[src]

impl FromCast<Simd<[u64; 2]>> for u8x2[src]

impl FromCast<Simd<[u64; 2]>> for i64x2[src]

impl FromCast<Simd<[u64; 2]>> for f64x2[src]

impl FromCast<Simd<[u64; 2]>> for m64x2[src]

impl FromCast<Simd<[u64; 2]>> for isizex2[src]

impl FromCast<Simd<[u64; 2]>> for usizex2[src]

impl FromCast<Simd<[u64; 2]>> for msizex2[src]

impl FromCast<Simd<[u64; 2]>> for i128x2[src]

impl FromCast<Simd<[u64; 2]>> for u128x2[src]

impl FromCast<Simd<[u64; 2]>> for m128x2[src]

impl FromCast<Simd<[u64; 2]>> for m8x2[src]

impl FromCast<Simd<[u64; 2]>> for i16x2[src]

impl FromCast<Simd<[u64; 2]>> for u16x2[src]

impl FromCast<Simd<[u64; 2]>> for m16x2[src]

impl FromCast<Simd<[u64; 2]>> for i32x2[src]

impl FromCast<Simd<[u64; 2]>> for u32x2[src]

impl FromCast<Simd<[u64; 2]>> for f32x2[src]

impl FromCast<Simd<[u64; 2]>> for m32x2[src]

impl FromCast<Simd<[u64; 4]>> for i8x4[src]

impl FromCast<Simd<[u64; 4]>> for u8x4[src]

impl FromCast<Simd<[u64; 4]>> for i64x4[src]

impl FromCast<Simd<[u64; 4]>> for f64x4[src]

impl FromCast<Simd<[u64; 4]>> for m64x4[src]

impl FromCast<Simd<[u64; 4]>> for isizex4[src]

impl FromCast<Simd<[u64; 4]>> for usizex4[src]

impl FromCast<Simd<[u64; 4]>> for msizex4[src]

impl FromCast<Simd<[u64; 4]>> for i128x4[src]

impl FromCast<Simd<[u64; 4]>> for u128x4[src]

impl FromCast<Simd<[u64; 4]>> for m128x4[src]

impl FromCast<Simd<[u64; 4]>> for m8x4[src]

impl FromCast<Simd<[u64; 4]>> for i16x4[src]

impl FromCast<Simd<[u64; 4]>> for u16x4[src]

impl FromCast<Simd<[u64; 4]>> for m16x4[src]

impl FromCast<Simd<[u64; 4]>> for i32x4[src]

impl FromCast<Simd<[u64; 4]>> for u32x4[src]

impl FromCast<Simd<[u64; 4]>> for f32x4[src]

impl FromCast<Simd<[u64; 4]>> for m32x4[src]

impl FromCast<Simd<[u64; 8]>> for i8x8[src]

impl FromCast<Simd<[u64; 8]>> for u8x8[src]

impl FromCast<Simd<[u64; 8]>> for i64x8[src]

impl FromCast<Simd<[u64; 8]>> for f64x8[src]

impl FromCast<Simd<[u64; 8]>> for m64x8[src]

impl FromCast<Simd<[u64; 8]>> for isizex8[src]

impl FromCast<Simd<[u64; 8]>> for usizex8[src]

impl FromCast<Simd<[u64; 8]>> for msizex8[src]

impl FromCast<Simd<[u64; 8]>> for m8x8[src]

impl FromCast<Simd<[u64; 8]>> for i16x8[src]

impl FromCast<Simd<[u64; 8]>> for u16x8[src]

impl FromCast<Simd<[u64; 8]>> for m16x8[src]

impl FromCast<Simd<[u64; 8]>> for i32x8[src]

impl FromCast<Simd<[u64; 8]>> for u32x8[src]

impl FromCast<Simd<[u64; 8]>> for f32x8[src]

impl FromCast<Simd<[u64; 8]>> for m32x8[src]

impl FromCast<Simd<[u8; 16]>> for i8x16[src]

impl FromCast<Simd<[u8; 16]>> for m8x16[src]

impl FromCast<Simd<[u8; 16]>> for i16x16[src]

impl FromCast<Simd<[u8; 16]>> for u16x16[src]

impl FromCast<Simd<[u8; 16]>> for m16x16[src]

impl FromCast<Simd<[u8; 16]>> for i32x16[src]

impl FromCast<Simd<[u8; 16]>> for u32x16[src]

impl FromCast<Simd<[u8; 16]>> for f32x16[src]

impl FromCast<Simd<[u8; 16]>> for m32x16[src]

impl FromCast<Simd<[u8; 2]>> for i8x2[src]

impl FromCast<Simd<[u8; 2]>> for m8x2[src]

impl FromCast<Simd<[u8; 2]>> for u64x2[src]

impl FromCast<Simd<[u8; 2]>> for f64x2[src]

impl FromCast<Simd<[u8; 2]>> for m64x2[src]

impl FromCast<Simd<[u8; 2]>> for isizex2[src]

impl FromCast<Simd<[u8; 2]>> for usizex2[src]

impl FromCast<Simd<[u8; 2]>> for msizex2[src]

impl FromCast<Simd<[u8; 2]>> for i128x2[src]

impl FromCast<Simd<[u8; 2]>> for u128x2[src]

impl FromCast<Simd<[u8; 2]>> for m128x2[src]

impl FromCast<Simd<[u8; 2]>> for i16x2[src]

impl FromCast<Simd<[u8; 2]>> for u16x2[src]

impl FromCast<Simd<[u8; 2]>> for m16x2[src]

impl FromCast<Simd<[u8; 2]>> for i32x2[src]

impl FromCast<Simd<[u8; 2]>> for u32x2[src]

impl FromCast<Simd<[u8; 2]>> for f32x2[src]

impl FromCast<Simd<[u8; 2]>> for m32x2[src]

impl FromCast<Simd<[u8; 2]>> for i64x2[src]

impl FromCast<Simd<[u8; 32]>> for i8x32[src]

impl FromCast<Simd<[u8; 32]>> for m8x32[src]

impl FromCast<Simd<[u8; 32]>> for i16x32[src]

impl FromCast<Simd<[u8; 32]>> for u16x32[src]

impl FromCast<Simd<[u8; 32]>> for m16x32[src]

impl FromCast<Simd<[u8; 4]>> for i8x4[src]

impl FromCast<Simd<[u8; 4]>> for m8x4[src]

impl FromCast<Simd<[u8; 4]>> for u64x4[src]

impl FromCast<Simd<[u8; 4]>> for f64x4[src]

impl FromCast<Simd<[u8; 4]>> for m64x4[src]

impl FromCast<Simd<[u8; 4]>> for isizex4[src]

impl FromCast<Simd<[u8; 4]>> for usizex4[src]

impl FromCast<Simd<[u8; 4]>> for msizex4[src]

impl FromCast<Simd<[u8; 4]>> for i128x4[src]

impl FromCast<Simd<[u8; 4]>> for u128x4[src]

impl FromCast<Simd<[u8; 4]>> for m128x4[src]

impl FromCast<Simd<[u8; 4]>> for i16x4[src]

impl FromCast<Simd<[u8; 4]>> for u16x4[src]

impl FromCast<Simd<[u8; 4]>> for m16x4[src]

impl FromCast<Simd<[u8; 4]>> for i32x4[src]

impl FromCast<Simd<[u8; 4]>> for u32x4[src]

impl FromCast<Simd<[u8; 4]>> for f32x4[src]

impl FromCast<Simd<[u8; 4]>> for m32x4[src]

impl FromCast<Simd<[u8; 4]>> for i64x4[src]

impl FromCast<Simd<[u8; 64]>> for i8x64[src]

impl FromCast<Simd<[u8; 64]>> for m8x64[src]

impl FromCast<Simd<[u8; 8]>> for i8x8[src]

impl FromCast<Simd<[u8; 8]>> for m8x8[src]

impl FromCast<Simd<[u8; 8]>> for u64x8[src]

impl FromCast<Simd<[u8; 8]>> for f64x8[src]

impl FromCast<Simd<[u8; 8]>> for m64x8[src]

impl FromCast<Simd<[u8; 8]>> for isizex8[src]

impl FromCast<Simd<[u8; 8]>> for usizex8[src]

impl FromCast<Simd<[u8; 8]>> for msizex8[src]

impl FromCast<Simd<[u8; 8]>> for i16x8[src]

impl FromCast<Simd<[u8; 8]>> for u16x8[src]

impl FromCast<Simd<[u8; 8]>> for m16x8[src]

impl FromCast<Simd<[u8; 8]>> for i32x8[src]

impl FromCast<Simd<[u8; 8]>> for u32x8[src]

impl FromCast<Simd<[u8; 8]>> for f32x8[src]

impl FromCast<Simd<[u8; 8]>> for m32x8[src]

impl FromCast<Simd<[u8; 8]>> for i64x8[src]

impl FromCast<Simd<[usize; 2]>> for i8x2[src]

impl FromCast<Simd<[usize; 2]>> for u8x2[src]

impl FromCast<Simd<[usize; 2]>> for i64x2[src]

impl FromCast<Simd<[usize; 2]>> for u64x2[src]

impl FromCast<Simd<[usize; 2]>> for f64x2[src]

impl FromCast<Simd<[usize; 2]>> for m64x2[src]

impl FromCast<Simd<[usize; 2]>> for isizex2[src]

impl FromCast<Simd<[usize; 2]>> for msizex2[src]

impl FromCast<Simd<[usize; 2]>> for i128x2[src]

impl FromCast<Simd<[usize; 2]>> for u128x2[src]

impl FromCast<Simd<[usize; 2]>> for m128x2[src]

impl FromCast<Simd<[usize; 2]>> for m8x2[src]

impl FromCast<Simd<[usize; 2]>> for i16x2[src]

impl FromCast<Simd<[usize; 2]>> for u16x2[src]

impl FromCast<Simd<[usize; 2]>> for m16x2[src]

impl FromCast<Simd<[usize; 2]>> for i32x2[src]

impl FromCast<Simd<[usize; 2]>> for u32x2[src]

impl FromCast<Simd<[usize; 2]>> for f32x2[src]

impl FromCast<Simd<[usize; 2]>> for m32x2[src]

impl FromCast<Simd<[usize; 4]>> for i8x4[src]

impl FromCast<Simd<[usize; 4]>> for u8x4[src]

impl FromCast<Simd<[usize; 4]>> for i64x4[src]

impl FromCast<Simd<[usize; 4]>> for u64x4[src]

impl FromCast<Simd<[usize; 4]>> for f64x4[src]

impl FromCast<Simd<[usize; 4]>> for m64x4[src]

impl FromCast<Simd<[usize; 4]>> for isizex4[src]

impl FromCast<Simd<[usize; 4]>> for msizex4[src]

impl FromCast<Simd<[usize; 4]>> for i128x4[src]

impl FromCast<Simd<[usize; 4]>> for u128x4[src]

impl FromCast<Simd<[usize; 4]>> for m128x4[src]

impl FromCast<Simd<[usize; 4]>> for m8x4[src]

impl FromCast<Simd<[usize; 4]>> for i16x4[src]

impl FromCast<Simd<[usize; 4]>> for u16x4[src]

impl FromCast<Simd<[usize; 4]>> for m16x4[src]

impl FromCast<Simd<[usize; 4]>> for i32x4[src]

impl FromCast<Simd<[usize; 4]>> for u32x4[src]

impl FromCast<Simd<[usize; 4]>> for f32x4[src]

impl FromCast<Simd<[usize; 4]>> for m32x4[src]

impl FromCast<Simd<[usize; 8]>> for i8x8[src]

impl FromCast<Simd<[usize; 8]>> for u8x8[src]

impl FromCast<Simd<[usize; 8]>> for i64x8[src]

impl FromCast<Simd<[usize; 8]>> for u64x8[src]

impl FromCast<Simd<[usize; 8]>> for f64x8[src]

impl FromCast<Simd<[usize; 8]>> for m64x8[src]

impl FromCast<Simd<[usize; 8]>> for isizex8[src]

impl FromCast<Simd<[usize; 8]>> for msizex8[src]

impl FromCast<Simd<[usize; 8]>> for m8x8[src]

impl FromCast<Simd<[usize; 8]>> for i16x8[src]

impl FromCast<Simd<[usize; 8]>> for u16x8[src]

impl FromCast<Simd<[usize; 8]>> for m16x8[src]

impl FromCast<Simd<[usize; 8]>> for i32x8[src]

impl FromCast<Simd<[usize; 8]>> for u32x8[src]

impl FromCast<Simd<[usize; 8]>> for f32x8[src]

impl FromCast<Simd<[usize; 8]>> for m32x8[src]

impl Mul<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the * operator.

+

impl Mul<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the * operator.

+

impl Mul<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the * operator.

+

impl Mul<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the * operator.

+

impl Mul<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the * operator.

+

impl Mul<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the * operator.

+

impl Mul<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the * operator.

+

impl Mul<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the * operator.

+

impl Mul<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the * operator.

+

impl Mul<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f32; 16]>> for f32x16[src]

impl MulAssign<Simd<[f32; 2]>> for f32x2[src]

impl MulAssign<Simd<[f32; 4]>> for f32x4[src]

impl MulAssign<Simd<[f32; 8]>> for f32x8[src]

impl MulAssign<Simd<[f64; 2]>> for f64x2[src]

impl MulAssign<Simd<[f64; 4]>> for f64x4[src]

impl MulAssign<Simd<[f64; 8]>> for f64x8[src]

impl MulAssign<Simd<[i128; 1]>> for i128x1[src]

impl MulAssign<Simd<[i128; 2]>> for i128x2[src]

impl MulAssign<Simd<[i128; 4]>> for i128x4[src]

impl MulAssign<Simd<[i16; 16]>> for i16x16[src]

impl MulAssign<Simd<[i16; 2]>> for i16x2[src]

impl MulAssign<Simd<[i16; 32]>> for i16x32[src]

impl MulAssign<Simd<[i16; 4]>> for i16x4[src]

impl MulAssign<Simd<[i16; 8]>> for i16x8[src]

impl MulAssign<Simd<[i32; 16]>> for i32x16[src]

impl MulAssign<Simd<[i32; 2]>> for i32x2[src]

impl MulAssign<Simd<[i32; 4]>> for i32x4[src]

impl MulAssign<Simd<[i32; 8]>> for i32x8[src]

impl MulAssign<Simd<[i64; 2]>> for i64x2[src]

impl MulAssign<Simd<[i64; 4]>> for i64x4[src]

impl MulAssign<Simd<[i64; 8]>> for i64x8[src]

impl MulAssign<Simd<[i8; 16]>> for i8x16[src]

impl MulAssign<Simd<[i8; 2]>> for i8x2[src]

impl MulAssign<Simd<[i8; 32]>> for i8x32[src]

impl MulAssign<Simd<[i8; 4]>> for i8x4[src]

impl MulAssign<Simd<[i8; 64]>> for i8x64[src]

impl MulAssign<Simd<[i8; 8]>> for i8x8[src]

impl MulAssign<Simd<[isize; 2]>> for isizex2[src]

impl MulAssign<Simd<[isize; 4]>> for isizex4[src]

impl MulAssign<Simd<[isize; 8]>> for isizex8[src]

impl MulAssign<Simd<[u128; 1]>> for u128x1[src]

impl MulAssign<Simd<[u128; 2]>> for u128x2[src]

impl MulAssign<Simd<[u128; 4]>> for u128x4[src]

impl MulAssign<Simd<[u16; 16]>> for u16x16[src]

impl MulAssign<Simd<[u16; 2]>> for u16x2[src]

impl MulAssign<Simd<[u16; 32]>> for u16x32[src]

impl MulAssign<Simd<[u16; 4]>> for u16x4[src]

impl MulAssign<Simd<[u16; 8]>> for u16x8[src]

impl MulAssign<Simd<[u32; 16]>> for u32x16[src]

impl MulAssign<Simd<[u32; 2]>> for u32x2[src]

impl MulAssign<Simd<[u32; 4]>> for u32x4[src]

impl MulAssign<Simd<[u32; 8]>> for u32x8[src]

impl MulAssign<Simd<[u64; 2]>> for u64x2[src]

impl MulAssign<Simd<[u64; 4]>> for u64x4[src]

impl MulAssign<Simd<[u64; 8]>> for u64x8[src]

impl MulAssign<Simd<[u8; 16]>> for u8x16[src]

impl MulAssign<Simd<[u8; 2]>> for u8x2[src]

impl MulAssign<Simd<[u8; 32]>> for u8x32[src]

impl MulAssign<Simd<[u8; 4]>> for u8x4[src]

impl MulAssign<Simd<[u8; 64]>> for u8x64[src]

impl MulAssign<Simd<[u8; 8]>> for u8x8[src]

impl MulAssign<Simd<[usize; 2]>> for usizex2[src]

impl MulAssign<Simd<[usize; 4]>> for usizex4[src]

impl MulAssign<Simd<[usize; 8]>> for usizex8[src]

impl<T> PartialEq<Simd<[*const T; 2]>> for cptrx2<T>[src]

impl<T> PartialEq<Simd<[*const T; 4]>> for cptrx4<T>[src]

impl<T> PartialEq<Simd<[*const T; 8]>> for cptrx8<T>[src]

impl<T> PartialEq<Simd<[*mut T; 2]>> for mptrx2<T>[src]

impl<T> PartialEq<Simd<[*mut T; 4]>> for mptrx4<T>[src]

impl<T> PartialEq<Simd<[*mut T; 8]>> for mptrx8<T>[src]

impl PartialEq<Simd<[f32; 16]>> for f32x16[src]

impl PartialEq<Simd<[f32; 2]>> for f32x2[src]

impl PartialEq<Simd<[f32; 4]>> for f32x4[src]

impl PartialEq<Simd<[f32; 8]>> for f32x8[src]

impl PartialEq<Simd<[f64; 2]>> for f64x2[src]

impl PartialEq<Simd<[f64; 4]>> for f64x4[src]

impl PartialEq<Simd<[f64; 8]>> for f64x8[src]

impl PartialEq<Simd<[i128; 1]>> for i128x1[src]

impl PartialEq<Simd<[i128; 2]>> for i128x2[src]

impl PartialEq<Simd<[i128; 4]>> for i128x4[src]

impl PartialEq<Simd<[i16; 16]>> for i16x16[src]

impl PartialEq<Simd<[i16; 2]>> for i16x2[src]

impl PartialEq<Simd<[i16; 32]>> for i16x32[src]

impl PartialEq<Simd<[i16; 4]>> for i16x4[src]

impl PartialEq<Simd<[i16; 8]>> for i16x8[src]

impl PartialEq<Simd<[i32; 16]>> for i32x16[src]

impl PartialEq<Simd<[i32; 2]>> for i32x2[src]

impl PartialEq<Simd<[i32; 4]>> for i32x4[src]

impl PartialEq<Simd<[i32; 8]>> for i32x8[src]

impl PartialEq<Simd<[i64; 2]>> for i64x2[src]

impl PartialEq<Simd<[i64; 4]>> for i64x4[src]

impl PartialEq<Simd<[i64; 8]>> for i64x8[src]

impl PartialEq<Simd<[i8; 16]>> for i8x16[src]

impl PartialEq<Simd<[i8; 2]>> for i8x2[src]

impl PartialEq<Simd<[i8; 32]>> for i8x32[src]

impl PartialEq<Simd<[i8; 4]>> for i8x4[src]

impl PartialEq<Simd<[i8; 64]>> for i8x64[src]

impl PartialEq<Simd<[i8; 8]>> for i8x8[src]

impl PartialEq<Simd<[isize; 2]>> for isizex2[src]

impl PartialEq<Simd<[isize; 4]>> for isizex4[src]

impl PartialEq<Simd<[isize; 8]>> for isizex8[src]

impl PartialEq<Simd<[m128; 1]>> for m128x1[src]

impl PartialEq<Simd<[m128; 2]>> for m128x2[src]

impl PartialEq<Simd<[m128; 4]>> for m128x4[src]

impl PartialEq<Simd<[m16; 16]>> for m16x16[src]

impl PartialEq<Simd<[m16; 2]>> for m16x2[src]

impl PartialEq<Simd<[m16; 32]>> for m16x32[src]

impl PartialEq<Simd<[m16; 4]>> for m16x4[src]

impl PartialEq<Simd<[m16; 8]>> for m16x8[src]

impl PartialEq<Simd<[m32; 16]>> for m32x16[src]

impl PartialEq<Simd<[m32; 2]>> for m32x2[src]

impl PartialEq<Simd<[m32; 4]>> for m32x4[src]

impl PartialEq<Simd<[m32; 8]>> for m32x8[src]

impl PartialEq<Simd<[m64; 2]>> for m64x2[src]

impl PartialEq<Simd<[m64; 4]>> for m64x4[src]

impl PartialEq<Simd<[m64; 8]>> for m64x8[src]

impl PartialEq<Simd<[m8; 16]>> for m8x16[src]

impl PartialEq<Simd<[m8; 2]>> for m8x2[src]

impl PartialEq<Simd<[m8; 32]>> for m8x32[src]

impl PartialEq<Simd<[m8; 4]>> for m8x4[src]

impl PartialEq<Simd<[m8; 64]>> for m8x64[src]

impl PartialEq<Simd<[m8; 8]>> for m8x8[src]

impl PartialEq<Simd<[msize; 2]>> for msizex2[src]

impl PartialEq<Simd<[msize; 4]>> for msizex4[src]

impl PartialEq<Simd<[msize; 8]>> for msizex8[src]

impl PartialEq<Simd<[u128; 1]>> for u128x1[src]

impl PartialEq<Simd<[u128; 2]>> for u128x2[src]

impl PartialEq<Simd<[u128; 4]>> for u128x4[src]

impl PartialEq<Simd<[u16; 16]>> for u16x16[src]

impl PartialEq<Simd<[u16; 2]>> for u16x2[src]

impl PartialEq<Simd<[u16; 32]>> for u16x32[src]

impl PartialEq<Simd<[u16; 4]>> for u16x4[src]

impl PartialEq<Simd<[u16; 8]>> for u16x8[src]

impl PartialEq<Simd<[u32; 16]>> for u32x16[src]

impl PartialEq<Simd<[u32; 2]>> for u32x2[src]

impl PartialEq<Simd<[u32; 4]>> for u32x4[src]

impl PartialEq<Simd<[u32; 8]>> for u32x8[src]

impl PartialEq<Simd<[u64; 2]>> for u64x2[src]

impl PartialEq<Simd<[u64; 4]>> for u64x4[src]

impl PartialEq<Simd<[u64; 8]>> for u64x8[src]

impl PartialEq<Simd<[u8; 16]>> for u8x16[src]

impl PartialEq<Simd<[u8; 2]>> for u8x2[src]

impl PartialEq<Simd<[u8; 32]>> for u8x32[src]

impl PartialEq<Simd<[u8; 4]>> for u8x4[src]

impl PartialEq<Simd<[u8; 64]>> for u8x64[src]

impl PartialEq<Simd<[u8; 8]>> for u8x8[src]

impl PartialEq<Simd<[usize; 2]>> for usizex2[src]

impl PartialEq<Simd<[usize; 4]>> for usizex4[src]

impl PartialEq<Simd<[usize; 8]>> for usizex8[src]

impl<'a> Product<&'a Simd<[f32; 16]>> for f32x16[src]

impl<'a> Product<&'a Simd<[f32; 2]>> for f32x2[src]

impl<'a> Product<&'a Simd<[f32; 4]>> for f32x4[src]

impl<'a> Product<&'a Simd<[f32; 8]>> for f32x8[src]

impl<'a> Product<&'a Simd<[f64; 2]>> for f64x2[src]

impl<'a> Product<&'a Simd<[f64; 4]>> for f64x4[src]

impl<'a> Product<&'a Simd<[f64; 8]>> for f64x8[src]

impl<'a> Product<&'a Simd<[i128; 1]>> for i128x1[src]

impl<'a> Product<&'a Simd<[i128; 2]>> for i128x2[src]

impl<'a> Product<&'a Simd<[i128; 4]>> for i128x4[src]

impl<'a> Product<&'a Simd<[i16; 16]>> for i16x16[src]

impl<'a> Product<&'a Simd<[i16; 2]>> for i16x2[src]

impl<'a> Product<&'a Simd<[i16; 32]>> for i16x32[src]

impl<'a> Product<&'a Simd<[i16; 4]>> for i16x4[src]

impl<'a> Product<&'a Simd<[i16; 8]>> for i16x8[src]

impl<'a> Product<&'a Simd<[i32; 16]>> for i32x16[src]

impl<'a> Product<&'a Simd<[i32; 2]>> for i32x2[src]

impl<'a> Product<&'a Simd<[i32; 4]>> for i32x4[src]

impl<'a> Product<&'a Simd<[i32; 8]>> for i32x8[src]

impl<'a> Product<&'a Simd<[i64; 2]>> for i64x2[src]

impl<'a> Product<&'a Simd<[i64; 4]>> for i64x4[src]

impl<'a> Product<&'a Simd<[i64; 8]>> for i64x8[src]

impl<'a> Product<&'a Simd<[i8; 16]>> for i8x16[src]

impl<'a> Product<&'a Simd<[i8; 2]>> for i8x2[src]

impl<'a> Product<&'a Simd<[i8; 32]>> for i8x32[src]

impl<'a> Product<&'a Simd<[i8; 4]>> for i8x4[src]

impl<'a> Product<&'a Simd<[i8; 64]>> for i8x64[src]

impl<'a> Product<&'a Simd<[i8; 8]>> for i8x8[src]

impl<'a> Product<&'a Simd<[isize; 2]>> for isizex2[src]

impl<'a> Product<&'a Simd<[isize; 4]>> for isizex4[src]

impl<'a> Product<&'a Simd<[isize; 8]>> for isizex8[src]

impl<'a> Product<&'a Simd<[u128; 1]>> for u128x1[src]

impl<'a> Product<&'a Simd<[u128; 2]>> for u128x2[src]

impl<'a> Product<&'a Simd<[u128; 4]>> for u128x4[src]

impl<'a> Product<&'a Simd<[u16; 16]>> for u16x16[src]

impl<'a> Product<&'a Simd<[u16; 2]>> for u16x2[src]

impl<'a> Product<&'a Simd<[u16; 32]>> for u16x32[src]

impl<'a> Product<&'a Simd<[u16; 4]>> for u16x4[src]

impl<'a> Product<&'a Simd<[u16; 8]>> for u16x8[src]

impl<'a> Product<&'a Simd<[u32; 16]>> for u32x16[src]

impl<'a> Product<&'a Simd<[u32; 2]>> for u32x2[src]

impl<'a> Product<&'a Simd<[u32; 4]>> for u32x4[src]

impl<'a> Product<&'a Simd<[u32; 8]>> for u32x8[src]

impl<'a> Product<&'a Simd<[u64; 2]>> for u64x2[src]

impl<'a> Product<&'a Simd<[u64; 4]>> for u64x4[src]

impl<'a> Product<&'a Simd<[u64; 8]>> for u64x8[src]

impl<'a> Product<&'a Simd<[u8; 16]>> for u8x16[src]

impl<'a> Product<&'a Simd<[u8; 2]>> for u8x2[src]

impl<'a> Product<&'a Simd<[u8; 32]>> for u8x32[src]

impl<'a> Product<&'a Simd<[u8; 4]>> for u8x4[src]

impl<'a> Product<&'a Simd<[u8; 64]>> for u8x64[src]

impl<'a> Product<&'a Simd<[u8; 8]>> for u8x8[src]

impl<'a> Product<&'a Simd<[usize; 2]>> for usizex2[src]

impl<'a> Product<&'a Simd<[usize; 4]>> for usizex4[src]

impl<'a> Product<&'a Simd<[usize; 8]>> for usizex8[src]

impl Product<Simd<[f32; 16]>> for f32x16[src]

impl Product<Simd<[f32; 2]>> for f32x2[src]

impl Product<Simd<[f32; 4]>> for f32x4[src]

impl Product<Simd<[f32; 8]>> for f32x8[src]

impl Product<Simd<[f64; 2]>> for f64x2[src]

impl Product<Simd<[f64; 4]>> for f64x4[src]

impl Product<Simd<[f64; 8]>> for f64x8[src]

impl Product<Simd<[i128; 1]>> for i128x1[src]

impl Product<Simd<[i128; 2]>> for i128x2[src]

impl Product<Simd<[i128; 4]>> for i128x4[src]

impl Product<Simd<[i16; 16]>> for i16x16[src]

impl Product<Simd<[i16; 2]>> for i16x2[src]

impl Product<Simd<[i16; 32]>> for i16x32[src]

impl Product<Simd<[i16; 4]>> for i16x4[src]

impl Product<Simd<[i16; 8]>> for i16x8[src]

impl Product<Simd<[i32; 16]>> for i32x16[src]

impl Product<Simd<[i32; 2]>> for i32x2[src]

impl Product<Simd<[i32; 4]>> for i32x4[src]

impl Product<Simd<[i32; 8]>> for i32x8[src]

impl Product<Simd<[i64; 2]>> for i64x2[src]

impl Product<Simd<[i64; 4]>> for i64x4[src]

impl Product<Simd<[i64; 8]>> for i64x8[src]

impl Product<Simd<[i8; 16]>> for i8x16[src]

impl Product<Simd<[i8; 2]>> for i8x2[src]

impl Product<Simd<[i8; 32]>> for i8x32[src]

impl Product<Simd<[i8; 4]>> for i8x4[src]

impl Product<Simd<[i8; 64]>> for i8x64[src]

impl Product<Simd<[i8; 8]>> for i8x8[src]

impl Product<Simd<[isize; 2]>> for isizex2[src]

impl Product<Simd<[isize; 4]>> for isizex4[src]

impl Product<Simd<[isize; 8]>> for isizex8[src]

impl Product<Simd<[u128; 1]>> for u128x1[src]

impl Product<Simd<[u128; 2]>> for u128x2[src]

impl Product<Simd<[u128; 4]>> for u128x4[src]

impl Product<Simd<[u16; 16]>> for u16x16[src]

impl Product<Simd<[u16; 2]>> for u16x2[src]

impl Product<Simd<[u16; 32]>> for u16x32[src]

impl Product<Simd<[u16; 4]>> for u16x4[src]

impl Product<Simd<[u16; 8]>> for u16x8[src]

impl Product<Simd<[u32; 16]>> for u32x16[src]

impl Product<Simd<[u32; 2]>> for u32x2[src]

impl Product<Simd<[u32; 4]>> for u32x4[src]

impl Product<Simd<[u32; 8]>> for u32x8[src]

impl Product<Simd<[u64; 2]>> for u64x2[src]

impl Product<Simd<[u64; 4]>> for u64x4[src]

impl Product<Simd<[u64; 8]>> for u64x8[src]

impl Product<Simd<[u8; 16]>> for u8x16[src]

impl Product<Simd<[u8; 2]>> for u8x2[src]

impl Product<Simd<[u8; 32]>> for u8x32[src]

impl Product<Simd<[u8; 4]>> for u8x4[src]

impl Product<Simd<[u8; 64]>> for u8x64[src]

impl Product<Simd<[u8; 8]>> for u8x8[src]

impl Product<Simd<[usize; 2]>> for usizex2[src]

impl Product<Simd<[usize; 4]>> for usizex4[src]

impl Product<Simd<[usize; 8]>> for usizex8[src]

impl Rem<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the % operator.

+

impl Rem<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the % operator.

+

impl Rem<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the % operator.

+

impl Rem<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the % operator.

+

impl Rem<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the % operator.

+

impl Rem<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the % operator.

+

impl Rem<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the % operator.

+

impl Rem<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the % operator.

+

impl Rem<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the % operator.

+

impl Rem<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f32; 16]>> for f32x16[src]

impl RemAssign<Simd<[f32; 2]>> for f32x2[src]

impl RemAssign<Simd<[f32; 4]>> for f32x4[src]

impl RemAssign<Simd<[f32; 8]>> for f32x8[src]

impl RemAssign<Simd<[f64; 2]>> for f64x2[src]

impl RemAssign<Simd<[f64; 4]>> for f64x4[src]

impl RemAssign<Simd<[f64; 8]>> for f64x8[src]

impl RemAssign<Simd<[i128; 1]>> for i128x1[src]

impl RemAssign<Simd<[i128; 2]>> for i128x2[src]

impl RemAssign<Simd<[i128; 4]>> for i128x4[src]

impl RemAssign<Simd<[i16; 16]>> for i16x16[src]

impl RemAssign<Simd<[i16; 2]>> for i16x2[src]

impl RemAssign<Simd<[i16; 32]>> for i16x32[src]

impl RemAssign<Simd<[i16; 4]>> for i16x4[src]

impl RemAssign<Simd<[i16; 8]>> for i16x8[src]

impl RemAssign<Simd<[i32; 16]>> for i32x16[src]

impl RemAssign<Simd<[i32; 2]>> for i32x2[src]

impl RemAssign<Simd<[i32; 4]>> for i32x4[src]

impl RemAssign<Simd<[i32; 8]>> for i32x8[src]

impl RemAssign<Simd<[i64; 2]>> for i64x2[src]

impl RemAssign<Simd<[i64; 4]>> for i64x4[src]

impl RemAssign<Simd<[i64; 8]>> for i64x8[src]

impl RemAssign<Simd<[i8; 16]>> for i8x16[src]

impl RemAssign<Simd<[i8; 2]>> for i8x2[src]

impl RemAssign<Simd<[i8; 32]>> for i8x32[src]

impl RemAssign<Simd<[i8; 4]>> for i8x4[src]

impl RemAssign<Simd<[i8; 64]>> for i8x64[src]

impl RemAssign<Simd<[i8; 8]>> for i8x8[src]

impl RemAssign<Simd<[isize; 2]>> for isizex2[src]

impl RemAssign<Simd<[isize; 4]>> for isizex4[src]

impl RemAssign<Simd<[isize; 8]>> for isizex8[src]

impl RemAssign<Simd<[u128; 1]>> for u128x1[src]

impl RemAssign<Simd<[u128; 2]>> for u128x2[src]

impl RemAssign<Simd<[u128; 4]>> for u128x4[src]

impl RemAssign<Simd<[u16; 16]>> for u16x16[src]

impl RemAssign<Simd<[u16; 2]>> for u16x2[src]

impl RemAssign<Simd<[u16; 32]>> for u16x32[src]

impl RemAssign<Simd<[u16; 4]>> for u16x4[src]

impl RemAssign<Simd<[u16; 8]>> for u16x8[src]

impl RemAssign<Simd<[u32; 16]>> for u32x16[src]

impl RemAssign<Simd<[u32; 2]>> for u32x2[src]

impl RemAssign<Simd<[u32; 4]>> for u32x4[src]

impl RemAssign<Simd<[u32; 8]>> for u32x8[src]

impl RemAssign<Simd<[u64; 2]>> for u64x2[src]

impl RemAssign<Simd<[u64; 4]>> for u64x4[src]

impl RemAssign<Simd<[u64; 8]>> for u64x8[src]

impl RemAssign<Simd<[u8; 16]>> for u8x16[src]

impl RemAssign<Simd<[u8; 2]>> for u8x2[src]

impl RemAssign<Simd<[u8; 32]>> for u8x32[src]

impl RemAssign<Simd<[u8; 4]>> for u8x4[src]

impl RemAssign<Simd<[u8; 64]>> for u8x64[src]

impl RemAssign<Simd<[u8; 8]>> for u8x8[src]

impl RemAssign<Simd<[usize; 2]>> for usizex2[src]

impl RemAssign<Simd<[usize; 4]>> for usizex4[src]

impl RemAssign<Simd<[usize; 8]>> for usizex8[src]

impl Shl<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i128; 1]>> for i128x1[src]

impl ShlAssign<Simd<[i128; 2]>> for i128x2[src]

impl ShlAssign<Simd<[i128; 4]>> for i128x4[src]

impl ShlAssign<Simd<[i16; 16]>> for i16x16[src]

impl ShlAssign<Simd<[i16; 2]>> for i16x2[src]

impl ShlAssign<Simd<[i16; 32]>> for i16x32[src]

impl ShlAssign<Simd<[i16; 4]>> for i16x4[src]

impl ShlAssign<Simd<[i16; 8]>> for i16x8[src]

impl ShlAssign<Simd<[i32; 16]>> for i32x16[src]

impl ShlAssign<Simd<[i32; 2]>> for i32x2[src]

impl ShlAssign<Simd<[i32; 4]>> for i32x4[src]

impl ShlAssign<Simd<[i32; 8]>> for i32x8[src]

impl ShlAssign<Simd<[i64; 2]>> for i64x2[src]

impl ShlAssign<Simd<[i64; 4]>> for i64x4[src]

impl ShlAssign<Simd<[i64; 8]>> for i64x8[src]

impl ShlAssign<Simd<[i8; 16]>> for i8x16[src]

impl ShlAssign<Simd<[i8; 2]>> for i8x2[src]

impl ShlAssign<Simd<[i8; 32]>> for i8x32[src]

impl ShlAssign<Simd<[i8; 4]>> for i8x4[src]

impl ShlAssign<Simd<[i8; 64]>> for i8x64[src]

impl ShlAssign<Simd<[i8; 8]>> for i8x8[src]

impl ShlAssign<Simd<[isize; 2]>> for isizex2[src]

impl ShlAssign<Simd<[isize; 4]>> for isizex4[src]

impl ShlAssign<Simd<[isize; 8]>> for isizex8[src]

impl ShlAssign<Simd<[u128; 1]>> for u128x1[src]

impl ShlAssign<Simd<[u128; 2]>> for u128x2[src]

impl ShlAssign<Simd<[u128; 4]>> for u128x4[src]

impl ShlAssign<Simd<[u16; 16]>> for u16x16[src]

impl ShlAssign<Simd<[u16; 2]>> for u16x2[src]

impl ShlAssign<Simd<[u16; 32]>> for u16x32[src]

impl ShlAssign<Simd<[u16; 4]>> for u16x4[src]

impl ShlAssign<Simd<[u16; 8]>> for u16x8[src]

impl ShlAssign<Simd<[u32; 16]>> for u32x16[src]

impl ShlAssign<Simd<[u32; 2]>> for u32x2[src]

impl ShlAssign<Simd<[u32; 4]>> for u32x4[src]

impl ShlAssign<Simd<[u32; 8]>> for u32x8[src]

impl ShlAssign<Simd<[u64; 2]>> for u64x2[src]

impl ShlAssign<Simd<[u64; 4]>> for u64x4[src]

impl ShlAssign<Simd<[u64; 8]>> for u64x8[src]

impl ShlAssign<Simd<[u8; 16]>> for u8x16[src]

impl ShlAssign<Simd<[u8; 2]>> for u8x2[src]

impl ShlAssign<Simd<[u8; 32]>> for u8x32[src]

impl ShlAssign<Simd<[u8; 4]>> for u8x4[src]

impl ShlAssign<Simd<[u8; 64]>> for u8x64[src]

impl ShlAssign<Simd<[u8; 8]>> for u8x8[src]

impl ShlAssign<Simd<[usize; 2]>> for usizex2[src]

impl ShlAssign<Simd<[usize; 4]>> for usizex4[src]

impl ShlAssign<Simd<[usize; 8]>> for usizex8[src]

impl Shr<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i128; 1]>> for i128x1[src]

impl ShrAssign<Simd<[i128; 2]>> for i128x2[src]

impl ShrAssign<Simd<[i128; 4]>> for i128x4[src]

impl ShrAssign<Simd<[i16; 16]>> for i16x16[src]

impl ShrAssign<Simd<[i16; 2]>> for i16x2[src]

impl ShrAssign<Simd<[i16; 32]>> for i16x32[src]

impl ShrAssign<Simd<[i16; 4]>> for i16x4[src]

impl ShrAssign<Simd<[i16; 8]>> for i16x8[src]

impl ShrAssign<Simd<[i32; 16]>> for i32x16[src]

impl ShrAssign<Simd<[i32; 2]>> for i32x2[src]

impl ShrAssign<Simd<[i32; 4]>> for i32x4[src]

impl ShrAssign<Simd<[i32; 8]>> for i32x8[src]

impl ShrAssign<Simd<[i64; 2]>> for i64x2[src]

impl ShrAssign<Simd<[i64; 4]>> for i64x4[src]

impl ShrAssign<Simd<[i64; 8]>> for i64x8[src]

impl ShrAssign<Simd<[i8; 16]>> for i8x16[src]

impl ShrAssign<Simd<[i8; 2]>> for i8x2[src]

impl ShrAssign<Simd<[i8; 32]>> for i8x32[src]

impl ShrAssign<Simd<[i8; 4]>> for i8x4[src]

impl ShrAssign<Simd<[i8; 64]>> for i8x64[src]

impl ShrAssign<Simd<[i8; 8]>> for i8x8[src]

impl ShrAssign<Simd<[isize; 2]>> for isizex2[src]

impl ShrAssign<Simd<[isize; 4]>> for isizex4[src]

impl ShrAssign<Simd<[isize; 8]>> for isizex8[src]

impl ShrAssign<Simd<[u128; 1]>> for u128x1[src]

impl ShrAssign<Simd<[u128; 2]>> for u128x2[src]

impl ShrAssign<Simd<[u128; 4]>> for u128x4[src]

impl ShrAssign<Simd<[u16; 16]>> for u16x16[src]

impl ShrAssign<Simd<[u16; 2]>> for u16x2[src]

impl ShrAssign<Simd<[u16; 32]>> for u16x32[src]

impl ShrAssign<Simd<[u16; 4]>> for u16x4[src]

impl ShrAssign<Simd<[u16; 8]>> for u16x8[src]

impl ShrAssign<Simd<[u32; 16]>> for u32x16[src]

impl ShrAssign<Simd<[u32; 2]>> for u32x2[src]

impl ShrAssign<Simd<[u32; 4]>> for u32x4[src]

impl ShrAssign<Simd<[u32; 8]>> for u32x8[src]

impl ShrAssign<Simd<[u64; 2]>> for u64x2[src]

impl ShrAssign<Simd<[u64; 4]>> for u64x4[src]

impl ShrAssign<Simd<[u64; 8]>> for u64x8[src]

impl ShrAssign<Simd<[u8; 16]>> for u8x16[src]

impl ShrAssign<Simd<[u8; 2]>> for u8x2[src]

impl ShrAssign<Simd<[u8; 32]>> for u8x32[src]

impl ShrAssign<Simd<[u8; 4]>> for u8x4[src]

impl ShrAssign<Simd<[u8; 64]>> for u8x64[src]

impl ShrAssign<Simd<[u8; 8]>> for u8x8[src]

impl ShrAssign<Simd<[usize; 2]>> for usizex2[src]

impl ShrAssign<Simd<[usize; 4]>> for usizex4[src]

impl ShrAssign<Simd<[usize; 8]>> for usizex8[src]

impl Sub<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[f32; 16]>> for f32[src]

type Output = f32x16

The resulting type after applying the - operator.

+

impl Sub<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[f32; 2]>> for f32[src]

type Output = f32x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[f32; 4]>> for f32[src]

type Output = f32x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[f32; 8]>> for f32[src]

type Output = f32x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[f64; 2]>> for f64[src]

type Output = f64x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[f64; 4]>> for f64[src]

type Output = f64x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[f64; 8]>> for f64[src]

type Output = f64x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i128; 1]>> for i128[src]

type Output = i128x1

The resulting type after applying the - operator.

+

impl Sub<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i128; 2]>> for i128[src]

type Output = i128x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i128; 4]>> for i128[src]

type Output = i128x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 16]>> for i16[src]

type Output = i16x16

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 2]>> for i16[src]

type Output = i16x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 32]>> for i16[src]

type Output = i16x32

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 4]>> for i16[src]

type Output = i16x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i16; 8]>> for i16[src]

type Output = i16x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 16]>> for i32[src]

type Output = i32x16

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 2]>> for i32[src]

type Output = i32x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 4]>> for i32[src]

type Output = i32x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i32; 8]>> for i32[src]

type Output = i32x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i64; 2]>> for i64[src]

type Output = i64x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i64; 4]>> for i64[src]

type Output = i64x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i64; 8]>> for i64[src]

type Output = i64x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 16]>> for i8[src]

type Output = i8x16

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 2]>> for i8[src]

type Output = i8x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 32]>> for i8[src]

type Output = i8x32

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 4]>> for i8[src]

type Output = i8x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 64]>> for i8[src]

type Output = i8x64

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[i8; 8]>> for i8[src]

type Output = i8x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[isize; 2]>> for isize[src]

type Output = isizex2

The resulting type after applying the - operator.

+

impl Sub<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[isize; 4]>> for isize[src]

type Output = isizex4

The resulting type after applying the - operator.

+

impl Sub<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[isize; 8]>> for isize[src]

type Output = isizex8

The resulting type after applying the - operator.

+

impl Sub<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u128; 1]>> for u128[src]

type Output = u128x1

The resulting type after applying the - operator.

+

impl Sub<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u128; 2]>> for u128[src]

type Output = u128x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u128; 4]>> for u128[src]

type Output = u128x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 16]>> for u16[src]

type Output = u16x16

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 2]>> for u16[src]

type Output = u16x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 32]>> for u16[src]

type Output = u16x32

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 4]>> for u16[src]

type Output = u16x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u16; 8]>> for u16[src]

type Output = u16x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 16]>> for u32[src]

type Output = u32x16

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 2]>> for u32[src]

type Output = u32x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 4]>> for u32[src]

type Output = u32x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u32; 8]>> for u32[src]

type Output = u32x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u64; 2]>> for u64[src]

type Output = u64x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u64; 4]>> for u64[src]

type Output = u64x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u64; 8]>> for u64[src]

type Output = u64x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 16]>> for u8[src]

type Output = u8x16

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 2]>> for u8[src]

type Output = u8x2

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 32]>> for u8[src]

type Output = u8x32

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 4]>> for u8[src]

type Output = u8x4

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 64]>> for u8[src]

type Output = u8x64

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[u8; 8]>> for u8[src]

type Output = u8x8

The resulting type after applying the - operator.

+

impl Sub<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[usize; 2]>> for usize[src]

type Output = usizex2

The resulting type after applying the - operator.

+

impl Sub<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[usize; 4]>> for usize[src]

type Output = usizex4

The resulting type after applying the - operator.

+

impl Sub<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<Simd<[usize; 8]>> for usize[src]

type Output = usizex8

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f32; 16]>> for f32x16[src]

impl SubAssign<Simd<[f32; 2]>> for f32x2[src]

impl SubAssign<Simd<[f32; 4]>> for f32x4[src]

impl SubAssign<Simd<[f32; 8]>> for f32x8[src]

impl SubAssign<Simd<[f64; 2]>> for f64x2[src]

impl SubAssign<Simd<[f64; 4]>> for f64x4[src]

impl SubAssign<Simd<[f64; 8]>> for f64x8[src]

impl SubAssign<Simd<[i128; 1]>> for i128x1[src]

impl SubAssign<Simd<[i128; 2]>> for i128x2[src]

impl SubAssign<Simd<[i128; 4]>> for i128x4[src]

impl SubAssign<Simd<[i16; 16]>> for i16x16[src]

impl SubAssign<Simd<[i16; 2]>> for i16x2[src]

impl SubAssign<Simd<[i16; 32]>> for i16x32[src]

impl SubAssign<Simd<[i16; 4]>> for i16x4[src]

impl SubAssign<Simd<[i16; 8]>> for i16x8[src]

impl SubAssign<Simd<[i32; 16]>> for i32x16[src]

impl SubAssign<Simd<[i32; 2]>> for i32x2[src]

impl SubAssign<Simd<[i32; 4]>> for i32x4[src]

impl SubAssign<Simd<[i32; 8]>> for i32x8[src]

impl SubAssign<Simd<[i64; 2]>> for i64x2[src]

impl SubAssign<Simd<[i64; 4]>> for i64x4[src]

impl SubAssign<Simd<[i64; 8]>> for i64x8[src]

impl SubAssign<Simd<[i8; 16]>> for i8x16[src]

impl SubAssign<Simd<[i8; 2]>> for i8x2[src]

impl SubAssign<Simd<[i8; 32]>> for i8x32[src]

impl SubAssign<Simd<[i8; 4]>> for i8x4[src]

impl SubAssign<Simd<[i8; 64]>> for i8x64[src]

impl SubAssign<Simd<[i8; 8]>> for i8x8[src]

impl SubAssign<Simd<[isize; 2]>> for isizex2[src]

impl SubAssign<Simd<[isize; 4]>> for isizex4[src]

impl SubAssign<Simd<[isize; 8]>> for isizex8[src]

impl SubAssign<Simd<[u128; 1]>> for u128x1[src]

impl SubAssign<Simd<[u128; 2]>> for u128x2[src]

impl SubAssign<Simd<[u128; 4]>> for u128x4[src]

impl SubAssign<Simd<[u16; 16]>> for u16x16[src]

impl SubAssign<Simd<[u16; 2]>> for u16x2[src]

impl SubAssign<Simd<[u16; 32]>> for u16x32[src]

impl SubAssign<Simd<[u16; 4]>> for u16x4[src]

impl SubAssign<Simd<[u16; 8]>> for u16x8[src]

impl SubAssign<Simd<[u32; 16]>> for u32x16[src]

impl SubAssign<Simd<[u32; 2]>> for u32x2[src]

impl SubAssign<Simd<[u32; 4]>> for u32x4[src]

impl SubAssign<Simd<[u32; 8]>> for u32x8[src]

impl SubAssign<Simd<[u64; 2]>> for u64x2[src]

impl SubAssign<Simd<[u64; 4]>> for u64x4[src]

impl SubAssign<Simd<[u64; 8]>> for u64x8[src]

impl SubAssign<Simd<[u8; 16]>> for u8x16[src]

impl SubAssign<Simd<[u8; 2]>> for u8x2[src]

impl SubAssign<Simd<[u8; 32]>> for u8x32[src]

impl SubAssign<Simd<[u8; 4]>> for u8x4[src]

impl SubAssign<Simd<[u8; 64]>> for u8x64[src]

impl SubAssign<Simd<[u8; 8]>> for u8x8[src]

impl SubAssign<Simd<[usize; 2]>> for usizex2[src]

impl SubAssign<Simd<[usize; 4]>> for usizex4[src]

impl SubAssign<Simd<[usize; 8]>> for usizex8[src]

impl<'a> Sum<&'a Simd<[f32; 16]>> for f32x16[src]

impl<'a> Sum<&'a Simd<[f32; 2]>> for f32x2[src]

impl<'a> Sum<&'a Simd<[f32; 4]>> for f32x4[src]

impl<'a> Sum<&'a Simd<[f32; 8]>> for f32x8[src]

impl<'a> Sum<&'a Simd<[f64; 2]>> for f64x2[src]

impl<'a> Sum<&'a Simd<[f64; 4]>> for f64x4[src]

impl<'a> Sum<&'a Simd<[f64; 8]>> for f64x8[src]

impl<'a> Sum<&'a Simd<[i128; 1]>> for i128x1[src]

impl<'a> Sum<&'a Simd<[i128; 2]>> for i128x2[src]

impl<'a> Sum<&'a Simd<[i128; 4]>> for i128x4[src]

impl<'a> Sum<&'a Simd<[i16; 16]>> for i16x16[src]

impl<'a> Sum<&'a Simd<[i16; 2]>> for i16x2[src]

impl<'a> Sum<&'a Simd<[i16; 32]>> for i16x32[src]

impl<'a> Sum<&'a Simd<[i16; 4]>> for i16x4[src]

impl<'a> Sum<&'a Simd<[i16; 8]>> for i16x8[src]

impl<'a> Sum<&'a Simd<[i32; 16]>> for i32x16[src]

impl<'a> Sum<&'a Simd<[i32; 2]>> for i32x2[src]

impl<'a> Sum<&'a Simd<[i32; 4]>> for i32x4[src]

impl<'a> Sum<&'a Simd<[i32; 8]>> for i32x8[src]

impl<'a> Sum<&'a Simd<[i64; 2]>> for i64x2[src]

impl<'a> Sum<&'a Simd<[i64; 4]>> for i64x4[src]

impl<'a> Sum<&'a Simd<[i64; 8]>> for i64x8[src]

impl<'a> Sum<&'a Simd<[i8; 16]>> for i8x16[src]

impl<'a> Sum<&'a Simd<[i8; 2]>> for i8x2[src]

impl<'a> Sum<&'a Simd<[i8; 32]>> for i8x32[src]

impl<'a> Sum<&'a Simd<[i8; 4]>> for i8x4[src]

impl<'a> Sum<&'a Simd<[i8; 64]>> for i8x64[src]

impl<'a> Sum<&'a Simd<[i8; 8]>> for i8x8[src]

impl<'a> Sum<&'a Simd<[isize; 2]>> for isizex2[src]

impl<'a> Sum<&'a Simd<[isize; 4]>> for isizex4[src]

impl<'a> Sum<&'a Simd<[isize; 8]>> for isizex8[src]

impl<'a> Sum<&'a Simd<[u128; 1]>> for u128x1[src]

impl<'a> Sum<&'a Simd<[u128; 2]>> for u128x2[src]

impl<'a> Sum<&'a Simd<[u128; 4]>> for u128x4[src]

impl<'a> Sum<&'a Simd<[u16; 16]>> for u16x16[src]

impl<'a> Sum<&'a Simd<[u16; 2]>> for u16x2[src]

impl<'a> Sum<&'a Simd<[u16; 32]>> for u16x32[src]

impl<'a> Sum<&'a Simd<[u16; 4]>> for u16x4[src]

impl<'a> Sum<&'a Simd<[u16; 8]>> for u16x8[src]

impl<'a> Sum<&'a Simd<[u32; 16]>> for u32x16[src]

impl<'a> Sum<&'a Simd<[u32; 2]>> for u32x2[src]

impl<'a> Sum<&'a Simd<[u32; 4]>> for u32x4[src]

impl<'a> Sum<&'a Simd<[u32; 8]>> for u32x8[src]

impl<'a> Sum<&'a Simd<[u64; 2]>> for u64x2[src]

impl<'a> Sum<&'a Simd<[u64; 4]>> for u64x4[src]

impl<'a> Sum<&'a Simd<[u64; 8]>> for u64x8[src]

impl<'a> Sum<&'a Simd<[u8; 16]>> for u8x16[src]

impl<'a> Sum<&'a Simd<[u8; 2]>> for u8x2[src]

impl<'a> Sum<&'a Simd<[u8; 32]>> for u8x32[src]

impl<'a> Sum<&'a Simd<[u8; 4]>> for u8x4[src]

impl<'a> Sum<&'a Simd<[u8; 64]>> for u8x64[src]

impl<'a> Sum<&'a Simd<[u8; 8]>> for u8x8[src]

impl<'a> Sum<&'a Simd<[usize; 2]>> for usizex2[src]

impl<'a> Sum<&'a Simd<[usize; 4]>> for usizex4[src]

impl<'a> Sum<&'a Simd<[usize; 8]>> for usizex8[src]

impl Sum<Simd<[f32; 16]>> for f32x16[src]

impl Sum<Simd<[f32; 2]>> for f32x2[src]

impl Sum<Simd<[f32; 4]>> for f32x4[src]

impl Sum<Simd<[f32; 8]>> for f32x8[src]

impl Sum<Simd<[f64; 2]>> for f64x2[src]

impl Sum<Simd<[f64; 4]>> for f64x4[src]

impl Sum<Simd<[f64; 8]>> for f64x8[src]

impl Sum<Simd<[i128; 1]>> for i128x1[src]

impl Sum<Simd<[i128; 2]>> for i128x2[src]

impl Sum<Simd<[i128; 4]>> for i128x4[src]

impl Sum<Simd<[i16; 16]>> for i16x16[src]

impl Sum<Simd<[i16; 2]>> for i16x2[src]

impl Sum<Simd<[i16; 32]>> for i16x32[src]

impl Sum<Simd<[i16; 4]>> for i16x4[src]

impl Sum<Simd<[i16; 8]>> for i16x8[src]

impl Sum<Simd<[i32; 16]>> for i32x16[src]

impl Sum<Simd<[i32; 2]>> for i32x2[src]

impl Sum<Simd<[i32; 4]>> for i32x4[src]

impl Sum<Simd<[i32; 8]>> for i32x8[src]

impl Sum<Simd<[i64; 2]>> for i64x2[src]

impl Sum<Simd<[i64; 4]>> for i64x4[src]

impl Sum<Simd<[i64; 8]>> for i64x8[src]

impl Sum<Simd<[i8; 16]>> for i8x16[src]

impl Sum<Simd<[i8; 2]>> for i8x2[src]

impl Sum<Simd<[i8; 32]>> for i8x32[src]

impl Sum<Simd<[i8; 4]>> for i8x4[src]

impl Sum<Simd<[i8; 64]>> for i8x64[src]

impl Sum<Simd<[i8; 8]>> for i8x8[src]

impl Sum<Simd<[isize; 2]>> for isizex2[src]

impl Sum<Simd<[isize; 4]>> for isizex4[src]

impl Sum<Simd<[isize; 8]>> for isizex8[src]

impl Sum<Simd<[u128; 1]>> for u128x1[src]

impl Sum<Simd<[u128; 2]>> for u128x2[src]

impl Sum<Simd<[u128; 4]>> for u128x4[src]

impl Sum<Simd<[u16; 16]>> for u16x16[src]

impl Sum<Simd<[u16; 2]>> for u16x2[src]

impl Sum<Simd<[u16; 32]>> for u16x32[src]

impl Sum<Simd<[u16; 4]>> for u16x4[src]

impl Sum<Simd<[u16; 8]>> for u16x8[src]

impl Sum<Simd<[u32; 16]>> for u32x16[src]

impl Sum<Simd<[u32; 2]>> for u32x2[src]

impl Sum<Simd<[u32; 4]>> for u32x4[src]

impl Sum<Simd<[u32; 8]>> for u32x8[src]

impl Sum<Simd<[u64; 2]>> for u64x2[src]

impl Sum<Simd<[u64; 4]>> for u64x4[src]

impl Sum<Simd<[u64; 8]>> for u64x8[src]

impl Sum<Simd<[u8; 16]>> for u8x16[src]

impl Sum<Simd<[u8; 2]>> for u8x2[src]

impl Sum<Simd<[u8; 32]>> for u8x32[src]

impl Sum<Simd<[u8; 4]>> for u8x4[src]

impl Sum<Simd<[u8; 64]>> for u8x64[src]

impl Sum<Simd<[u8; 8]>> for u8x8[src]

impl Sum<Simd<[usize; 2]>> for usizex2[src]

impl Sum<Simd<[usize; 4]>> for usizex4[src]

impl Sum<Simd<[usize; 8]>> for usizex8[src]

Auto Trait Implementations

impl<A> Send for Simd<A> where
    <A as SimdArray>::Tuple: Send

impl<A> Sync for Simd<A> where
    <A as SimdArray>::Tuple: Sync

impl<A> Unpin for Simd<A> where
    <A as SimdArray>::Tuple: Unpin

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/struct.m128.html b/packed_simd/struct.m128.html new file mode 100644 index 000000000..f836438aa --- /dev/null +++ b/packed_simd/struct.m128.html @@ -0,0 +1,30 @@ +packed_simd::m128 - Rust

[][src]Struct packed_simd::m128

pub struct m128(_);

128-bit wide mask.

+

Implementations

impl m128[src]

pub fn new(x: bool) -> Self[src]

Instantiate a mask with value

+

pub fn test(&self) -> bool[src]

Test if the mask is set

+

Trait Implementations

impl Clone for m128[src]

impl Copy for m128[src]

impl Debug for m128[src]

impl Default for m128[src]

impl Eq for m128[src]

impl Hash for m128[src]

impl Mask for m128[src]

impl Ord for m128[src]

impl PartialEq<m128> for m128[src]

impl PartialOrd<m128> for m128[src]

Auto Trait Implementations

impl Send for m128

impl Sync for m128

impl Unpin for m128

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/struct.m16.html b/packed_simd/struct.m16.html new file mode 100644 index 000000000..e45ef81ee --- /dev/null +++ b/packed_simd/struct.m16.html @@ -0,0 +1,30 @@ +packed_simd::m16 - Rust

[][src]Struct packed_simd::m16

pub struct m16(_);

16-bit wide mask.

+

Implementations

impl m16[src]

pub fn new(x: bool) -> Self[src]

Instantiate a mask with value

+

pub fn test(&self) -> bool[src]

Test if the mask is set

+

Trait Implementations

impl Clone for m16[src]

impl Copy for m16[src]

impl Debug for m16[src]

impl Default for m16[src]

impl Eq for m16[src]

impl Hash for m16[src]

impl Mask for m16[src]

impl Ord for m16[src]

impl PartialEq<m16> for m16[src]

impl PartialOrd<m16> for m16[src]

Auto Trait Implementations

impl Send for m16

impl Sync for m16

impl Unpin for m16

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/struct.m32.html b/packed_simd/struct.m32.html new file mode 100644 index 000000000..3276f8906 --- /dev/null +++ b/packed_simd/struct.m32.html @@ -0,0 +1,30 @@ +packed_simd::m32 - Rust

[][src]Struct packed_simd::m32

pub struct m32(_);

32-bit wide mask.

+

Implementations

impl m32[src]

pub fn new(x: bool) -> Self[src]

Instantiate a mask with value

+

pub fn test(&self) -> bool[src]

Test if the mask is set

+

Trait Implementations

impl Clone for m32[src]

impl Copy for m32[src]

impl Debug for m32[src]

impl Default for m32[src]

impl Eq for m32[src]

impl Hash for m32[src]

impl Mask for m32[src]

impl Ord for m32[src]

impl PartialEq<m32> for m32[src]

impl PartialOrd<m32> for m32[src]

Auto Trait Implementations

impl Send for m32

impl Sync for m32

impl Unpin for m32

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/struct.m64.html b/packed_simd/struct.m64.html new file mode 100644 index 000000000..5c81abf47 --- /dev/null +++ b/packed_simd/struct.m64.html @@ -0,0 +1,30 @@ +packed_simd::m64 - Rust

[][src]Struct packed_simd::m64

pub struct m64(_);

64-bit wide mask.

+

Implementations

impl m64[src]

pub fn new(x: bool) -> Self[src]

Instantiate a mask with value

+

pub fn test(&self) -> bool[src]

Test if the mask is set

+

Trait Implementations

impl Clone for m64[src]

impl Copy for m64[src]

impl Debug for m64[src]

impl Default for m64[src]

impl Eq for m64[src]

impl Hash for m64[src]

impl Mask for m64[src]

impl Ord for m64[src]

impl PartialEq<m64> for m64[src]

impl PartialOrd<m64> for m64[src]

Auto Trait Implementations

impl Send for m64

impl Sync for m64

impl Unpin for m64

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/struct.m8.html b/packed_simd/struct.m8.html new file mode 100644 index 000000000..fbf4fd8b4 --- /dev/null +++ b/packed_simd/struct.m8.html @@ -0,0 +1,30 @@ +packed_simd::m8 - Rust

[][src]Struct packed_simd::m8

pub struct m8(_);

8-bit wide mask.

+

Implementations

impl m8[src]

pub fn new(x: bool) -> Self[src]

Instantiate a mask with value

+

pub fn test(&self) -> bool[src]

Test if the mask is set

+

Trait Implementations

impl Clone for m8[src]

impl Copy for m8[src]

impl Debug for m8[src]

impl Default for m8[src]

impl Eq for m8[src]

impl Hash for m8[src]

impl Mask for m8[src]

impl Ord for m8[src]

impl PartialEq<m8> for m8[src]

impl PartialOrd<m8> for m8[src]

Auto Trait Implementations

impl Send for m8

impl Sync for m8

impl Unpin for m8

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/struct.msize.html b/packed_simd/struct.msize.html new file mode 100644 index 000000000..4c99b6227 --- /dev/null +++ b/packed_simd/struct.msize.html @@ -0,0 +1,30 @@ +packed_simd::msize - Rust

[][src]Struct packed_simd::msize

pub struct msize(_);

isize-wide mask.

+

Implementations

impl msize[src]

pub fn new(x: bool) -> Self[src]

Instantiate a mask with value

+

pub fn test(&self) -> bool[src]

Test if the mask is set

+

Trait Implementations

impl Clone for msize[src]

impl Copy for msize[src]

impl Debug for msize[src]

impl Default for msize[src]

impl Eq for msize[src]

impl Hash for msize[src]

impl Mask for msize[src]

impl Ord for msize[src]

impl PartialEq<msize> for msize[src]

impl PartialOrd<msize> for msize[src]

Auto Trait Implementations

impl Send for msize

impl Sync for msize

impl Unpin for msize

Blanket Implementations

impl<T> Any for T where
    T: 'static + ?Sized
[src]

impl<T> Borrow<T> for T where
    T: ?Sized
[src]

impl<T> BorrowMut<T> for T where
    T: ?Sized
[src]

impl<T> From<T> for T[src]

impl<T, U> Into<U> for T where
    U: From<T>, 
[src]

impl<T, U> TryFrom<U> for T where
    U: Into<T>, 
[src]

type Error = Infallible

The type returned in the event of a conversion error.

+

impl<T, U> TryInto<U> for T where
    U: TryFrom<T>, 
[src]

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.

+
\ No newline at end of file diff --git a/packed_simd/trait.Cast.html b/packed_simd/trait.Cast.html new file mode 100644 index 000000000..d83fe4ab9 --- /dev/null +++ b/packed_simd/trait.Cast.html @@ -0,0 +1,44 @@ +packed_simd::Cast - Rust

[][src]Trait packed_simd::Cast

pub trait Cast<T>: Sized {
+    fn cast(self) -> T;
+}

Numeric cast from Self to T.

+
+

Note: This is a temporary workaround until the conversion traits +specified > in RFC2484 are implemented.

+
+

Numeric cast between vectors with the same number of lanes, such that:

+
    +
  • +

    casting integer vectors whose lane types have the same size (e.g. i32xN +-> u32xN) is a no-op,

    +
  • +
  • +

    casting from a larger integer to a smaller integer (e.g. u32xN -> +u8xN) will truncate,

    +
  • +
  • +

    casting from a smaller integer to a larger integer (e.g. u8xN -> +u32xN) will:

    +
      +
    • zero-extend if the source is unsigned, or
    • +
    • sign-extend if the source is signed,
    • +
    +
  • +
  • +

    casting from a float to an integer will round the float towards zero,

    +
  • +
  • +

    casting from an integer to float will produce the floating point +representation of the integer, rounding to nearest, ties to even,

    +
  • +
  • +

    casting from an f32 to an f64 is perfect and lossless,

    +
  • +
  • +

    casting from an f64 to an f32 rounds to nearest, ties to even.

    +
  • +
+
+

Required methods

fn cast(self) -> T

Numeric cast from self to T.

+
Loading content... +

Implementors

impl<T, U> Cast<U> for T where
    U: FromCast<T>, 
[src]

FromCast implies Cast.

+
Loading content...
\ No newline at end of file diff --git a/packed_simd/trait.FromBits.html b/packed_simd/trait.FromBits.html new file mode 100644 index 000000000..1c5e67053 --- /dev/null +++ b/packed_simd/trait.FromBits.html @@ -0,0 +1,9 @@ +packed_simd::FromBits - Rust

[][src]Trait packed_simd::FromBits

pub trait FromBits<T>: Sized {
+    fn from_bits(t: T) -> Self;
+}

Safe lossless bitwise conversion from T to Self.

+
+

Required methods

fn from_bits(t: T) -> Self

Safe lossless bitwise transmute from T to Self.

+
Loading content... +

Implementations on Foreign Types

impl FromBits<Simd<[i8; 8]>> for __m64[src]

impl FromBits<Simd<[u8; 8]>> for __m64[src]

impl FromBits<Simd<[m8; 8]>> for __m64[src]

impl FromBits<Simd<[i16; 4]>> for __m64[src]

impl FromBits<Simd<[u16; 4]>> for __m64[src]

impl FromBits<Simd<[m16; 4]>> for __m64[src]

impl FromBits<Simd<[i32; 2]>> for __m64[src]

impl FromBits<Simd<[u32; 2]>> for __m64[src]

impl FromBits<Simd<[f32; 2]>> for __m64[src]

impl FromBits<Simd<[m32; 2]>> for __m64[src]

impl FromBits<Simd<[i8; 16]>> for __m128[src]

impl FromBits<Simd<[u8; 16]>> for __m128[src]

impl FromBits<Simd<[m8; 16]>> for __m128[src]

impl FromBits<Simd<[i16; 8]>> for __m128[src]

impl FromBits<Simd<[u16; 8]>> for __m128[src]

impl FromBits<Simd<[m16; 8]>> for __m128[src]

impl FromBits<Simd<[i32; 4]>> for __m128[src]

impl FromBits<Simd<[u32; 4]>> for __m128[src]

impl FromBits<Simd<[f32; 4]>> for __m128[src]

impl FromBits<Simd<[m32; 4]>> for __m128[src]

impl FromBits<Simd<[i64; 2]>> for __m128[src]

impl FromBits<Simd<[u64; 2]>> for __m128[src]

impl FromBits<Simd<[f64; 2]>> for __m128[src]

impl FromBits<Simd<[m64; 2]>> for __m128[src]

impl FromBits<Simd<[i128; 1]>> for __m128[src]

impl FromBits<Simd<[u128; 1]>> for __m128[src]

impl FromBits<Simd<[m128; 1]>> for __m128[src]

impl FromBits<Simd<[i8; 16]>> for __m128i[src]

impl FromBits<Simd<[u8; 16]>> for __m128i[src]

impl FromBits<Simd<[m8; 16]>> for __m128i[src]

impl FromBits<Simd<[i16; 8]>> for __m128i[src]

impl FromBits<Simd<[u16; 8]>> for __m128i[src]

impl FromBits<Simd<[m16; 8]>> for __m128i[src]

impl FromBits<Simd<[i32; 4]>> for __m128i[src]

impl FromBits<Simd<[u32; 4]>> for __m128i[src]

impl FromBits<Simd<[f32; 4]>> for __m128i[src]

impl FromBits<Simd<[m32; 4]>> for __m128i[src]

impl FromBits<Simd<[i64; 2]>> for __m128i[src]

impl FromBits<Simd<[u64; 2]>> for __m128i[src]

impl FromBits<Simd<[f64; 2]>> for __m128i[src]

impl FromBits<Simd<[m64; 2]>> for __m128i[src]

impl FromBits<Simd<[i128; 1]>> for __m128i[src]

impl FromBits<Simd<[u128; 1]>> for __m128i[src]

impl FromBits<Simd<[m128; 1]>> for __m128i[src]

impl FromBits<Simd<[i8; 16]>> for __m128d[src]

impl FromBits<Simd<[u8; 16]>> for __m128d[src]

impl FromBits<Simd<[m8; 16]>> for __m128d[src]

impl FromBits<Simd<[i16; 8]>> for __m128d[src]

impl FromBits<Simd<[u16; 8]>> for __m128d[src]

impl FromBits<Simd<[m16; 8]>> for __m128d[src]

impl FromBits<Simd<[i32; 4]>> for __m128d[src]

impl FromBits<Simd<[u32; 4]>> for __m128d[src]

impl FromBits<Simd<[f32; 4]>> for __m128d[src]

impl FromBits<Simd<[m32; 4]>> for __m128d[src]

impl FromBits<Simd<[i64; 2]>> for __m128d[src]

impl FromBits<Simd<[u64; 2]>> for __m128d[src]

impl FromBits<Simd<[f64; 2]>> for __m128d[src]

impl FromBits<Simd<[m64; 2]>> for __m128d[src]

impl FromBits<Simd<[i128; 1]>> for __m128d[src]

impl FromBits<Simd<[u128; 1]>> for __m128d[src]

impl FromBits<Simd<[m128; 1]>> for __m128d[src]

impl FromBits<Simd<[i8; 32]>> for __m256[src]

impl FromBits<Simd<[u8; 32]>> for __m256[src]

impl FromBits<Simd<[m8; 32]>> for __m256[src]

impl FromBits<Simd<[i16; 16]>> for __m256[src]

impl FromBits<Simd<[u16; 16]>> for __m256[src]

impl FromBits<Simd<[m16; 16]>> for __m256[src]

impl FromBits<Simd<[i32; 8]>> for __m256[src]

impl FromBits<Simd<[u32; 8]>> for __m256[src]

impl FromBits<Simd<[f32; 8]>> for __m256[src]

impl FromBits<Simd<[m32; 8]>> for __m256[src]

impl FromBits<Simd<[i64; 4]>> for __m256[src]

impl FromBits<Simd<[u64; 4]>> for __m256[src]

impl FromBits<Simd<[f64; 4]>> for __m256[src]

impl FromBits<Simd<[m64; 4]>> for __m256[src]

impl FromBits<Simd<[i128; 2]>> for __m256[src]

impl FromBits<Simd<[u128; 2]>> for __m256[src]

impl FromBits<Simd<[m128; 2]>> for __m256[src]

impl FromBits<Simd<[i8; 32]>> for __m256i[src]

impl FromBits<Simd<[u8; 32]>> for __m256i[src]

impl FromBits<Simd<[m8; 32]>> for __m256i[src]

impl FromBits<Simd<[i16; 16]>> for __m256i[src]

impl FromBits<Simd<[u16; 16]>> for __m256i[src]

impl FromBits<Simd<[m16; 16]>> for __m256i[src]

impl FromBits<Simd<[i32; 8]>> for __m256i[src]

impl FromBits<Simd<[u32; 8]>> for __m256i[src]

impl FromBits<Simd<[f32; 8]>> for __m256i[src]

impl FromBits<Simd<[m32; 8]>> for __m256i[src]

impl FromBits<Simd<[i64; 4]>> for __m256i[src]

impl FromBits<Simd<[u64; 4]>> for __m256i[src]

impl FromBits<Simd<[f64; 4]>> for __m256i[src]

impl FromBits<Simd<[m64; 4]>> for __m256i[src]

impl FromBits<Simd<[i128; 2]>> for __m256i[src]

impl FromBits<Simd<[u128; 2]>> for __m256i[src]

impl FromBits<Simd<[m128; 2]>> for __m256i[src]

impl FromBits<Simd<[i8; 32]>> for __m256d[src]

impl FromBits<Simd<[u8; 32]>> for __m256d[src]

impl FromBits<Simd<[m8; 32]>> for __m256d[src]

impl FromBits<Simd<[i16; 16]>> for __m256d[src]

impl FromBits<Simd<[u16; 16]>> for __m256d[src]

impl FromBits<Simd<[m16; 16]>> for __m256d[src]

impl FromBits<Simd<[i32; 8]>> for __m256d[src]

impl FromBits<Simd<[u32; 8]>> for __m256d[src]

impl FromBits<Simd<[f32; 8]>> for __m256d[src]

impl FromBits<Simd<[m32; 8]>> for __m256d[src]

impl FromBits<Simd<[i64; 4]>> for __m256d[src]

impl FromBits<Simd<[u64; 4]>> for __m256d[src]

impl FromBits<Simd<[f64; 4]>> for __m256d[src]

impl FromBits<Simd<[m64; 4]>> for __m256d[src]

impl FromBits<Simd<[i128; 2]>> for __m256d[src]

impl FromBits<Simd<[u128; 2]>> for __m256d[src]

impl FromBits<Simd<[m128; 2]>> for __m256d[src]

Loading content... +

Implementors

impl FromBits<Simd<[m128; 1]>> for f32x4[src]

impl FromBits<Simd<[m128; 1]>> for f64x2[src]

impl FromBits<Simd<[m128; 1]>> for i128x1[src]

impl FromBits<Simd<[m128; 1]>> for i16x8[src]

impl FromBits<Simd<[m128; 1]>> for i32x4[src]

impl FromBits<Simd<[m128; 1]>> for i64x2[src]

impl FromBits<Simd<[m128; 1]>> for i8x16[src]

impl FromBits<Simd<[m128; 1]>> for m16x8[src]

impl FromBits<Simd<[m128; 1]>> for m32x4[src]

impl FromBits<Simd<[m128; 1]>> for m64x2[src]

impl FromBits<Simd<[m128; 1]>> for m8x16[src]

impl FromBits<Simd<[m128; 1]>> for u128x1[src]

impl FromBits<Simd<[m128; 1]>> for u16x8[src]

impl FromBits<Simd<[m128; 1]>> for u32x4[src]

impl FromBits<Simd<[m128; 1]>> for u64x2[src]

impl FromBits<Simd<[m128; 1]>> for u8x16[src]

impl FromBits<Simd<[m128; 2]>> for f32x8[src]

impl FromBits<Simd<[m128; 2]>> for f64x4[src]

impl FromBits<Simd<[m128; 2]>> for i128x2[src]

impl FromBits<Simd<[m128; 2]>> for i16x16[src]

impl FromBits<Simd<[m128; 2]>> for i32x8[src]

impl FromBits<Simd<[m128; 2]>> for i64x4[src]

impl FromBits<Simd<[m128; 2]>> for i8x32[src]

impl FromBits<Simd<[m128; 2]>> for m16x16[src]

impl FromBits<Simd<[m128; 2]>> for m32x8[src]

impl FromBits<Simd<[m128; 2]>> for m64x4[src]

impl FromBits<Simd<[m128; 2]>> for m8x32[src]

impl FromBits<Simd<[m128; 2]>> for u128x2[src]

impl FromBits<Simd<[m128; 2]>> for u16x16[src]

impl FromBits<Simd<[m128; 2]>> for u32x8[src]

impl FromBits<Simd<[m128; 2]>> for u64x4[src]

impl FromBits<Simd<[m128; 2]>> for u8x32[src]

impl FromBits<Simd<[m128; 4]>> for f32x16[src]

impl FromBits<Simd<[m128; 4]>> for f64x8[src]

impl FromBits<Simd<[m128; 4]>> for i128x4[src]

impl FromBits<Simd<[m128; 4]>> for i16x32[src]

impl FromBits<Simd<[m128; 4]>> for i32x16[src]

impl FromBits<Simd<[m128; 4]>> for i64x8[src]

impl FromBits<Simd<[m128; 4]>> for i8x64[src]

impl FromBits<Simd<[m128; 4]>> for m16x32[src]

impl FromBits<Simd<[m128; 4]>> for m32x16[src]

impl FromBits<Simd<[m128; 4]>> for m64x8[src]

impl FromBits<Simd<[m128; 4]>> for m8x64[src]

impl FromBits<Simd<[m128; 4]>> for u128x4[src]

impl FromBits<Simd<[m128; 4]>> for u16x32[src]

impl FromBits<Simd<[m128; 4]>> for u32x16[src]

impl FromBits<Simd<[m128; 4]>> for u64x8[src]

impl FromBits<Simd<[m128; 4]>> for u8x64[src]

impl FromBits<Simd<[m16; 16]>> for f32x8[src]

impl FromBits<Simd<[m16; 16]>> for f64x4[src]

impl FromBits<Simd<[m16; 16]>> for i128x2[src]

impl FromBits<Simd<[m16; 16]>> for i16x16[src]

impl FromBits<Simd<[m16; 16]>> for i32x8[src]

impl FromBits<Simd<[m16; 16]>> for i64x4[src]

impl FromBits<Simd<[m16; 16]>> for i8x32[src]

impl FromBits<Simd<[m16; 16]>> for m8x32[src]

impl FromBits<Simd<[m16; 16]>> for u128x2[src]

impl FromBits<Simd<[m16; 16]>> for u16x16[src]

impl FromBits<Simd<[m16; 16]>> for u32x8[src]

impl FromBits<Simd<[m16; 16]>> for u64x4[src]

impl FromBits<Simd<[m16; 16]>> for u8x32[src]

impl FromBits<Simd<[m16; 2]>> for i16x2[src]

impl FromBits<Simd<[m16; 2]>> for i8x4[src]

impl FromBits<Simd<[m16; 2]>> for m8x4[src]

impl FromBits<Simd<[m16; 2]>> for u16x2[src]

impl FromBits<Simd<[m16; 2]>> for u8x4[src]

impl FromBits<Simd<[m16; 32]>> for f32x16[src]

impl FromBits<Simd<[m16; 32]>> for f64x8[src]

impl FromBits<Simd<[m16; 32]>> for i128x4[src]

impl FromBits<Simd<[m16; 32]>> for i16x32[src]

impl FromBits<Simd<[m16; 32]>> for i32x16[src]

impl FromBits<Simd<[m16; 32]>> for i64x8[src]

impl FromBits<Simd<[m16; 32]>> for i8x64[src]

impl FromBits<Simd<[m16; 32]>> for m8x64[src]

impl FromBits<Simd<[m16; 32]>> for u128x4[src]

impl FromBits<Simd<[m16; 32]>> for u16x32[src]

impl FromBits<Simd<[m16; 32]>> for u32x16[src]

impl FromBits<Simd<[m16; 32]>> for u64x8[src]

impl FromBits<Simd<[m16; 32]>> for u8x64[src]

impl FromBits<Simd<[m16; 4]>> for f32x2[src]

impl FromBits<Simd<[m16; 4]>> for i16x4[src]

impl FromBits<Simd<[m16; 4]>> for i32x2[src]

impl FromBits<Simd<[m16; 4]>> for i8x8[src]

impl FromBits<Simd<[m16; 4]>> for m8x8[src]

impl FromBits<Simd<[m16; 4]>> for u16x4[src]

impl FromBits<Simd<[m16; 4]>> for u32x2[src]

impl FromBits<Simd<[m16; 4]>> for u8x8[src]

impl FromBits<Simd<[m16; 8]>> for f32x4[src]

impl FromBits<Simd<[m16; 8]>> for f64x2[src]

impl FromBits<Simd<[m16; 8]>> for i128x1[src]

impl FromBits<Simd<[m16; 8]>> for i16x8[src]

impl FromBits<Simd<[m16; 8]>> for i32x4[src]

impl FromBits<Simd<[m16; 8]>> for i64x2[src]

impl FromBits<Simd<[m16; 8]>> for i8x16[src]

impl FromBits<Simd<[m16; 8]>> for m8x16[src]

impl FromBits<Simd<[m16; 8]>> for u128x1[src]

impl FromBits<Simd<[m16; 8]>> for u16x8[src]

impl FromBits<Simd<[m16; 8]>> for u32x4[src]

impl FromBits<Simd<[m16; 8]>> for u64x2[src]

impl FromBits<Simd<[m16; 8]>> for u8x16[src]

impl FromBits<Simd<[m32; 16]>> for f32x16[src]

impl FromBits<Simd<[m32; 16]>> for f64x8[src]

impl FromBits<Simd<[m32; 16]>> for i128x4[src]

impl FromBits<Simd<[m32; 16]>> for i16x32[src]

impl FromBits<Simd<[m32; 16]>> for i32x16[src]

impl FromBits<Simd<[m32; 16]>> for i64x8[src]

impl FromBits<Simd<[m32; 16]>> for i8x64[src]

impl FromBits<Simd<[m32; 16]>> for m16x32[src]

impl FromBits<Simd<[m32; 16]>> for m8x64[src]

impl FromBits<Simd<[m32; 16]>> for u128x4[src]

impl FromBits<Simd<[m32; 16]>> for u16x32[src]

impl FromBits<Simd<[m32; 16]>> for u32x16[src]

impl FromBits<Simd<[m32; 16]>> for u64x8[src]

impl FromBits<Simd<[m32; 16]>> for u8x64[src]

impl FromBits<Simd<[m32; 2]>> for f32x2[src]

impl FromBits<Simd<[m32; 2]>> for i16x4[src]

impl FromBits<Simd<[m32; 2]>> for i32x2[src]

impl FromBits<Simd<[m32; 2]>> for i8x8[src]

impl FromBits<Simd<[m32; 2]>> for m16x4[src]

impl FromBits<Simd<[m32; 2]>> for m8x8[src]

impl FromBits<Simd<[m32; 2]>> for u16x4[src]

impl FromBits<Simd<[m32; 2]>> for u32x2[src]

impl FromBits<Simd<[m32; 2]>> for u8x8[src]

impl FromBits<Simd<[m32; 4]>> for f32x4[src]

impl FromBits<Simd<[m32; 4]>> for f64x2[src]

impl FromBits<Simd<[m32; 4]>> for i128x1[src]

impl FromBits<Simd<[m32; 4]>> for i16x8[src]

impl FromBits<Simd<[m32; 4]>> for i32x4[src]

impl FromBits<Simd<[m32; 4]>> for i64x2[src]

impl FromBits<Simd<[m32; 4]>> for i8x16[src]

impl FromBits<Simd<[m32; 4]>> for m16x8[src]

impl FromBits<Simd<[m32; 4]>> for m8x16[src]

impl FromBits<Simd<[m32; 4]>> for u128x1[src]

impl FromBits<Simd<[m32; 4]>> for u16x8[src]

impl FromBits<Simd<[m32; 4]>> for u32x4[src]

impl FromBits<Simd<[m32; 4]>> for u64x2[src]

impl FromBits<Simd<[m32; 4]>> for u8x16[src]

impl FromBits<Simd<[m32; 8]>> for f32x8[src]

impl FromBits<Simd<[m32; 8]>> for f64x4[src]

impl FromBits<Simd<[m32; 8]>> for i128x2[src]

impl FromBits<Simd<[m32; 8]>> for i16x16[src]

impl FromBits<Simd<[m32; 8]>> for i32x8[src]

impl FromBits<Simd<[m32; 8]>> for i64x4[src]

impl FromBits<Simd<[m32; 8]>> for i8x32[src]

impl FromBits<Simd<[m32; 8]>> for m16x16[src]

impl FromBits<Simd<[m32; 8]>> for m8x32[src]

impl FromBits<Simd<[m32; 8]>> for u128x2[src]

impl FromBits<Simd<[m32; 8]>> for u16x16[src]

impl FromBits<Simd<[m32; 8]>> for u32x8[src]

impl FromBits<Simd<[m32; 8]>> for u64x4[src]

impl FromBits<Simd<[m32; 8]>> for u8x32[src]

impl FromBits<Simd<[m64; 2]>> for f32x4[src]

impl FromBits<Simd<[m64; 2]>> for f64x2[src]

impl FromBits<Simd<[m64; 2]>> for i128x1[src]

impl FromBits<Simd<[m64; 2]>> for i16x8[src]

impl FromBits<Simd<[m64; 2]>> for i32x4[src]

impl FromBits<Simd<[m64; 2]>> for i64x2[src]

impl FromBits<Simd<[m64; 2]>> for i8x16[src]

impl FromBits<Simd<[m64; 2]>> for m16x8[src]

impl FromBits<Simd<[m64; 2]>> for m32x4[src]

impl FromBits<Simd<[m64; 2]>> for m8x16[src]

impl FromBits<Simd<[m64; 2]>> for u128x1[src]

impl FromBits<Simd<[m64; 2]>> for u16x8[src]

impl FromBits<Simd<[m64; 2]>> for u32x4[src]

impl FromBits<Simd<[m64; 2]>> for u64x2[src]

impl FromBits<Simd<[m64; 2]>> for u8x16[src]

impl FromBits<Simd<[m64; 4]>> for f32x8[src]

impl FromBits<Simd<[m64; 4]>> for f64x4[src]

impl FromBits<Simd<[m64; 4]>> for i128x2[src]

impl FromBits<Simd<[m64; 4]>> for i16x16[src]

impl FromBits<Simd<[m64; 4]>> for i32x8[src]

impl FromBits<Simd<[m64; 4]>> for i64x4[src]

impl FromBits<Simd<[m64; 4]>> for i8x32[src]

impl FromBits<Simd<[m64; 4]>> for m16x16[src]

impl FromBits<Simd<[m64; 4]>> for m32x8[src]

impl FromBits<Simd<[m64; 4]>> for m8x32[src]

impl FromBits<Simd<[m64; 4]>> for u128x2[src]

impl FromBits<Simd<[m64; 4]>> for u16x16[src]

impl FromBits<Simd<[m64; 4]>> for u32x8[src]

impl FromBits<Simd<[m64; 4]>> for u64x4[src]

impl FromBits<Simd<[m64; 4]>> for u8x32[src]

impl FromBits<Simd<[m64; 8]>> for f32x16[src]

impl FromBits<Simd<[m64; 8]>> for f64x8[src]

impl FromBits<Simd<[m64; 8]>> for i128x4[src]

impl FromBits<Simd<[m64; 8]>> for i16x32[src]

impl FromBits<Simd<[m64; 8]>> for i32x16[src]

impl FromBits<Simd<[m64; 8]>> for i64x8[src]

impl FromBits<Simd<[m64; 8]>> for i8x64[src]

impl FromBits<Simd<[m64; 8]>> for m16x32[src]

impl FromBits<Simd<[m64; 8]>> for m32x16[src]

impl FromBits<Simd<[m64; 8]>> for m8x64[src]

impl FromBits<Simd<[m64; 8]>> for u128x4[src]

impl FromBits<Simd<[m64; 8]>> for u16x32[src]

impl FromBits<Simd<[m64; 8]>> for u32x16[src]

impl FromBits<Simd<[m64; 8]>> for u64x8[src]

impl FromBits<Simd<[m64; 8]>> for u8x64[src]

impl FromBits<Simd<[m8; 16]>> for f32x4[src]

impl FromBits<Simd<[m8; 16]>> for f64x2[src]

impl FromBits<Simd<[m8; 16]>> for i128x1[src]

impl FromBits<Simd<[m8; 16]>> for i16x8[src]

impl FromBits<Simd<[m8; 16]>> for i32x4[src]

impl FromBits<Simd<[m8; 16]>> for i64x2[src]

impl FromBits<Simd<[m8; 16]>> for i8x16[src]

impl FromBits<Simd<[m8; 16]>> for u128x1[src]

impl FromBits<Simd<[m8; 16]>> for u16x8[src]

impl FromBits<Simd<[m8; 16]>> for u32x4[src]

impl FromBits<Simd<[m8; 16]>> for u64x2[src]

impl FromBits<Simd<[m8; 16]>> for u8x16[src]

impl FromBits<Simd<[m8; 2]>> for i8x2[src]

impl FromBits<Simd<[m8; 2]>> for u8x2[src]

impl FromBits<Simd<[m8; 32]>> for f32x8[src]

impl FromBits<Simd<[m8; 32]>> for f64x4[src]

impl FromBits<Simd<[m8; 32]>> for i128x2[src]

impl FromBits<Simd<[m8; 32]>> for i16x16[src]

impl FromBits<Simd<[m8; 32]>> for i32x8[src]

impl FromBits<Simd<[m8; 32]>> for i64x4[src]

impl FromBits<Simd<[m8; 32]>> for i8x32[src]

impl FromBits<Simd<[m8; 32]>> for u128x2[src]

impl FromBits<Simd<[m8; 32]>> for u16x16[src]

impl FromBits<Simd<[m8; 32]>> for u32x8[src]

impl FromBits<Simd<[m8; 32]>> for u64x4[src]

impl FromBits<Simd<[m8; 32]>> for u8x32[src]

impl FromBits<Simd<[m8; 4]>> for i16x2[src]

impl FromBits<Simd<[m8; 4]>> for i8x4[src]

impl FromBits<Simd<[m8; 4]>> for u16x2[src]

impl FromBits<Simd<[m8; 4]>> for u8x4[src]

impl FromBits<Simd<[m8; 64]>> for f32x16[src]

impl FromBits<Simd<[m8; 64]>> for f64x8[src]

impl FromBits<Simd<[m8; 64]>> for i128x4[src]

impl FromBits<Simd<[m8; 64]>> for i16x32[src]

impl FromBits<Simd<[m8; 64]>> for i32x16[src]

impl FromBits<Simd<[m8; 64]>> for i64x8[src]

impl FromBits<Simd<[m8; 64]>> for i8x64[src]

impl FromBits<Simd<[m8; 64]>> for u128x4[src]

impl FromBits<Simd<[m8; 64]>> for u16x32[src]

impl FromBits<Simd<[m8; 64]>> for u32x16[src]

impl FromBits<Simd<[m8; 64]>> for u64x8[src]

impl FromBits<Simd<[m8; 64]>> for u8x64[src]

impl FromBits<Simd<[m8; 8]>> for f32x2[src]

impl FromBits<Simd<[m8; 8]>> for i16x4[src]

impl FromBits<Simd<[m8; 8]>> for i32x2[src]

impl FromBits<Simd<[m8; 8]>> for i8x8[src]

impl FromBits<Simd<[m8; 8]>> for u16x4[src]

impl FromBits<Simd<[m8; 8]>> for u32x2[src]

impl FromBits<Simd<[m8; 8]>> for u8x8[src]

impl FromBits<Simd<[f32; 16]>> for f64x8[src]

impl FromBits<Simd<[f32; 16]>> for i128x4[src]

impl FromBits<Simd<[f32; 16]>> for i16x32[src]

impl FromBits<Simd<[f32; 16]>> for i32x16[src]

impl FromBits<Simd<[f32; 16]>> for i64x8[src]

impl FromBits<Simd<[f32; 16]>> for i8x64[src]

impl FromBits<Simd<[f32; 16]>> for u128x4[src]

impl FromBits<Simd<[f32; 16]>> for u16x32[src]

impl FromBits<Simd<[f32; 16]>> for u32x16[src]

impl FromBits<Simd<[f32; 16]>> for u64x8[src]

impl FromBits<Simd<[f32; 16]>> for u8x64[src]

impl FromBits<Simd<[f32; 2]>> for i16x4[src]

impl FromBits<Simd<[f32; 2]>> for i32x2[src]

impl FromBits<Simd<[f32; 2]>> for i8x8[src]

impl FromBits<Simd<[f32; 2]>> for u16x4[src]

impl FromBits<Simd<[f32; 2]>> for u32x2[src]

impl FromBits<Simd<[f32; 2]>> for u8x8[src]

impl FromBits<Simd<[f32; 4]>> for f64x2[src]

impl FromBits<Simd<[f32; 4]>> for i128x1[src]

impl FromBits<Simd<[f32; 4]>> for i16x8[src]

impl FromBits<Simd<[f32; 4]>> for i32x4[src]

impl FromBits<Simd<[f32; 4]>> for i64x2[src]

impl FromBits<Simd<[f32; 4]>> for i8x16[src]

impl FromBits<Simd<[f32; 4]>> for u128x1[src]

impl FromBits<Simd<[f32; 4]>> for u16x8[src]

impl FromBits<Simd<[f32; 4]>> for u32x4[src]

impl FromBits<Simd<[f32; 4]>> for u64x2[src]

impl FromBits<Simd<[f32; 4]>> for u8x16[src]

impl FromBits<Simd<[f32; 8]>> for f64x4[src]

impl FromBits<Simd<[f32; 8]>> for i128x2[src]

impl FromBits<Simd<[f32; 8]>> for i16x16[src]

impl FromBits<Simd<[f32; 8]>> for i32x8[src]

impl FromBits<Simd<[f32; 8]>> for i64x4[src]

impl FromBits<Simd<[f32; 8]>> for i8x32[src]

impl FromBits<Simd<[f32; 8]>> for u128x2[src]

impl FromBits<Simd<[f32; 8]>> for u16x16[src]

impl FromBits<Simd<[f32; 8]>> for u32x8[src]

impl FromBits<Simd<[f32; 8]>> for u64x4[src]

impl FromBits<Simd<[f32; 8]>> for u8x32[src]

impl FromBits<Simd<[f64; 2]>> for f32x4[src]

impl FromBits<Simd<[f64; 2]>> for i128x1[src]

impl FromBits<Simd<[f64; 2]>> for i16x8[src]

impl FromBits<Simd<[f64; 2]>> for i32x4[src]

impl FromBits<Simd<[f64; 2]>> for i64x2[src]

impl FromBits<Simd<[f64; 2]>> for i8x16[src]

impl FromBits<Simd<[f64; 2]>> for u128x1[src]

impl FromBits<Simd<[f64; 2]>> for u16x8[src]

impl FromBits<Simd<[f64; 2]>> for u32x4[src]

impl FromBits<Simd<[f64; 2]>> for u64x2[src]

impl FromBits<Simd<[f64; 2]>> for u8x16[src]

impl FromBits<Simd<[f64; 4]>> for f32x8[src]

impl FromBits<Simd<[f64; 4]>> for i128x2[src]

impl FromBits<Simd<[f64; 4]>> for i16x16[src]

impl FromBits<Simd<[f64; 4]>> for i32x8[src]

impl FromBits<Simd<[f64; 4]>> for i64x4[src]

impl FromBits<Simd<[f64; 4]>> for i8x32[src]

impl FromBits<Simd<[f64; 4]>> for u128x2[src]

impl FromBits<Simd<[f64; 4]>> for u16x16[src]

impl FromBits<Simd<[f64; 4]>> for u32x8[src]

impl FromBits<Simd<[f64; 4]>> for u64x4[src]

impl FromBits<Simd<[f64; 4]>> for u8x32[src]

impl FromBits<Simd<[f64; 8]>> for f32x16[src]

impl FromBits<Simd<[f64; 8]>> for i128x4[src]

impl FromBits<Simd<[f64; 8]>> for i16x32[src]

impl FromBits<Simd<[f64; 8]>> for i32x16[src]

impl FromBits<Simd<[f64; 8]>> for i64x8[src]

impl FromBits<Simd<[f64; 8]>> for i8x64[src]

impl FromBits<Simd<[f64; 8]>> for u128x4[src]

impl FromBits<Simd<[f64; 8]>> for u16x32[src]

impl FromBits<Simd<[f64; 8]>> for u32x16[src]

impl FromBits<Simd<[f64; 8]>> for u64x8[src]

impl FromBits<Simd<[f64; 8]>> for u8x64[src]

impl FromBits<Simd<[i128; 1]>> for f32x4[src]

impl FromBits<Simd<[i128; 1]>> for f64x2[src]

impl FromBits<Simd<[i128; 1]>> for i16x8[src]

impl FromBits<Simd<[i128; 1]>> for i32x4[src]

impl FromBits<Simd<[i128; 1]>> for i64x2[src]

impl FromBits<Simd<[i128; 1]>> for i8x16[src]

impl FromBits<Simd<[i128; 1]>> for u128x1[src]

impl FromBits<Simd<[i128; 1]>> for u16x8[src]

impl FromBits<Simd<[i128; 1]>> for u32x4[src]

impl FromBits<Simd<[i128; 1]>> for u64x2[src]

impl FromBits<Simd<[i128; 1]>> for u8x16[src]

impl FromBits<Simd<[i128; 2]>> for f32x8[src]

impl FromBits<Simd<[i128; 2]>> for f64x4[src]

impl FromBits<Simd<[i128; 2]>> for i16x16[src]

impl FromBits<Simd<[i128; 2]>> for i32x8[src]

impl FromBits<Simd<[i128; 2]>> for i64x4[src]

impl FromBits<Simd<[i128; 2]>> for i8x32[src]

impl FromBits<Simd<[i128; 2]>> for u128x2[src]

impl FromBits<Simd<[i128; 2]>> for u16x16[src]

impl FromBits<Simd<[i128; 2]>> for u32x8[src]

impl FromBits<Simd<[i128; 2]>> for u64x4[src]

impl FromBits<Simd<[i128; 2]>> for u8x32[src]

impl FromBits<Simd<[i128; 4]>> for f32x16[src]

impl FromBits<Simd<[i128; 4]>> for f64x8[src]

impl FromBits<Simd<[i128; 4]>> for i16x32[src]

impl FromBits<Simd<[i128; 4]>> for i32x16[src]

impl FromBits<Simd<[i128; 4]>> for i64x8[src]

impl FromBits<Simd<[i128; 4]>> for i8x64[src]

impl FromBits<Simd<[i128; 4]>> for u128x4[src]

impl FromBits<Simd<[i128; 4]>> for u16x32[src]

impl FromBits<Simd<[i128; 4]>> for u32x16[src]

impl FromBits<Simd<[i128; 4]>> for u64x8[src]

impl FromBits<Simd<[i128; 4]>> for u8x64[src]

impl FromBits<Simd<[i16; 16]>> for f32x8[src]

impl FromBits<Simd<[i16; 16]>> for f64x4[src]

impl FromBits<Simd<[i16; 16]>> for i128x2[src]

impl FromBits<Simd<[i16; 16]>> for i32x8[src]

impl FromBits<Simd<[i16; 16]>> for i64x4[src]

impl FromBits<Simd<[i16; 16]>> for i8x32[src]

impl FromBits<Simd<[i16; 16]>> for u128x2[src]

impl FromBits<Simd<[i16; 16]>> for u16x16[src]

impl FromBits<Simd<[i16; 16]>> for u32x8[src]

impl FromBits<Simd<[i16; 16]>> for u64x4[src]

impl FromBits<Simd<[i16; 16]>> for u8x32[src]

impl FromBits<Simd<[i16; 2]>> for i8x4[src]

impl FromBits<Simd<[i16; 2]>> for u16x2[src]

impl FromBits<Simd<[i16; 2]>> for u8x4[src]

impl FromBits<Simd<[i16; 32]>> for f32x16[src]

impl FromBits<Simd<[i16; 32]>> for f64x8[src]

impl FromBits<Simd<[i16; 32]>> for i128x4[src]

impl FromBits<Simd<[i16; 32]>> for i32x16[src]

impl FromBits<Simd<[i16; 32]>> for i64x8[src]

impl FromBits<Simd<[i16; 32]>> for i8x64[src]

impl FromBits<Simd<[i16; 32]>> for u128x4[src]

impl FromBits<Simd<[i16; 32]>> for u16x32[src]

impl FromBits<Simd<[i16; 32]>> for u32x16[src]

impl FromBits<Simd<[i16; 32]>> for u64x8[src]

impl FromBits<Simd<[i16; 32]>> for u8x64[src]

impl FromBits<Simd<[i16; 4]>> for f32x2[src]

impl FromBits<Simd<[i16; 4]>> for i32x2[src]

impl FromBits<Simd<[i16; 4]>> for i8x8[src]

impl FromBits<Simd<[i16; 4]>> for u16x4[src]

impl FromBits<Simd<[i16; 4]>> for u32x2[src]

impl FromBits<Simd<[i16; 4]>> for u8x8[src]

impl FromBits<Simd<[i16; 8]>> for f32x4[src]

impl FromBits<Simd<[i16; 8]>> for f64x2[src]

impl FromBits<Simd<[i16; 8]>> for i128x1[src]

impl FromBits<Simd<[i16; 8]>> for i32x4[src]

impl FromBits<Simd<[i16; 8]>> for i64x2[src]

impl FromBits<Simd<[i16; 8]>> for i8x16[src]

impl FromBits<Simd<[i16; 8]>> for u128x1[src]

impl FromBits<Simd<[i16; 8]>> for u16x8[src]

impl FromBits<Simd<[i16; 8]>> for u32x4[src]

impl FromBits<Simd<[i16; 8]>> for u64x2[src]

impl FromBits<Simd<[i16; 8]>> for u8x16[src]

impl FromBits<Simd<[i32; 16]>> for f32x16[src]

impl FromBits<Simd<[i32; 16]>> for f64x8[src]

impl FromBits<Simd<[i32; 16]>> for i128x4[src]

impl FromBits<Simd<[i32; 16]>> for i16x32[src]

impl FromBits<Simd<[i32; 16]>> for i64x8[src]

impl FromBits<Simd<[i32; 16]>> for i8x64[src]

impl FromBits<Simd<[i32; 16]>> for u128x4[src]

impl FromBits<Simd<[i32; 16]>> for u16x32[src]

impl FromBits<Simd<[i32; 16]>> for u32x16[src]

impl FromBits<Simd<[i32; 16]>> for u64x8[src]

impl FromBits<Simd<[i32; 16]>> for u8x64[src]

impl FromBits<Simd<[i32; 2]>> for f32x2[src]

impl FromBits<Simd<[i32; 2]>> for i16x4[src]

impl FromBits<Simd<[i32; 2]>> for i8x8[src]

impl FromBits<Simd<[i32; 2]>> for u16x4[src]

impl FromBits<Simd<[i32; 2]>> for u32x2[src]

impl FromBits<Simd<[i32; 2]>> for u8x8[src]

impl FromBits<Simd<[i32; 4]>> for f32x4[src]

impl FromBits<Simd<[i32; 4]>> for f64x2[src]

impl FromBits<Simd<[i32; 4]>> for i128x1[src]

impl FromBits<Simd<[i32; 4]>> for i16x8[src]

impl FromBits<Simd<[i32; 4]>> for i64x2[src]

impl FromBits<Simd<[i32; 4]>> for i8x16[src]

impl FromBits<Simd<[i32; 4]>> for u128x1[src]

impl FromBits<Simd<[i32; 4]>> for u16x8[src]

impl FromBits<Simd<[i32; 4]>> for u32x4[src]

impl FromBits<Simd<[i32; 4]>> for u64x2[src]

impl FromBits<Simd<[i32; 4]>> for u8x16[src]

impl FromBits<Simd<[i32; 8]>> for f32x8[src]

impl FromBits<Simd<[i32; 8]>> for f64x4[src]

impl FromBits<Simd<[i32; 8]>> for i128x2[src]

impl FromBits<Simd<[i32; 8]>> for i16x16[src]

impl FromBits<Simd<[i32; 8]>> for i64x4[src]

impl FromBits<Simd<[i32; 8]>> for i8x32[src]

impl FromBits<Simd<[i32; 8]>> for u128x2[src]

impl FromBits<Simd<[i32; 8]>> for u16x16[src]

impl FromBits<Simd<[i32; 8]>> for u32x8[src]

impl FromBits<Simd<[i32; 8]>> for u64x4[src]

impl FromBits<Simd<[i32; 8]>> for u8x32[src]

impl FromBits<Simd<[i64; 2]>> for f32x4[src]

impl FromBits<Simd<[i64; 2]>> for f64x2[src]

impl FromBits<Simd<[i64; 2]>> for i128x1[src]

impl FromBits<Simd<[i64; 2]>> for i16x8[src]

impl FromBits<Simd<[i64; 2]>> for i32x4[src]

impl FromBits<Simd<[i64; 2]>> for i8x16[src]

impl FromBits<Simd<[i64; 2]>> for u128x1[src]

impl FromBits<Simd<[i64; 2]>> for u16x8[src]

impl FromBits<Simd<[i64; 2]>> for u32x4[src]

impl FromBits<Simd<[i64; 2]>> for u64x2[src]

impl FromBits<Simd<[i64; 2]>> for u8x16[src]

impl FromBits<Simd<[i64; 4]>> for f32x8[src]

impl FromBits<Simd<[i64; 4]>> for f64x4[src]

impl FromBits<Simd<[i64; 4]>> for i128x2[src]

impl FromBits<Simd<[i64; 4]>> for i16x16[src]

impl FromBits<Simd<[i64; 4]>> for i32x8[src]

impl FromBits<Simd<[i64; 4]>> for i8x32[src]

impl FromBits<Simd<[i64; 4]>> for u128x2[src]

impl FromBits<Simd<[i64; 4]>> for u16x16[src]

impl FromBits<Simd<[i64; 4]>> for u32x8[src]

impl FromBits<Simd<[i64; 4]>> for u64x4[src]

impl FromBits<Simd<[i64; 4]>> for u8x32[src]

impl FromBits<Simd<[i64; 8]>> for f32x16[src]

impl FromBits<Simd<[i64; 8]>> for f64x8[src]

impl FromBits<Simd<[i64; 8]>> for i128x4[src]

impl FromBits<Simd<[i64; 8]>> for i16x32[src]

impl FromBits<Simd<[i64; 8]>> for i32x16[src]

impl FromBits<Simd<[i64; 8]>> for i8x64[src]

impl FromBits<Simd<[i64; 8]>> for u128x4[src]

impl FromBits<Simd<[i64; 8]>> for u16x32[src]

impl FromBits<Simd<[i64; 8]>> for u32x16[src]

impl FromBits<Simd<[i64; 8]>> for u64x8[src]

impl FromBits<Simd<[i64; 8]>> for u8x64[src]

impl FromBits<Simd<[i8; 16]>> for f32x4[src]

impl FromBits<Simd<[i8; 16]>> for f64x2[src]

impl FromBits<Simd<[i8; 16]>> for i128x1[src]

impl FromBits<Simd<[i8; 16]>> for i16x8[src]

impl FromBits<Simd<[i8; 16]>> for i32x4[src]

impl FromBits<Simd<[i8; 16]>> for i64x2[src]

impl FromBits<Simd<[i8; 16]>> for u128x1[src]

impl FromBits<Simd<[i8; 16]>> for u16x8[src]

impl FromBits<Simd<[i8; 16]>> for u32x4[src]

impl FromBits<Simd<[i8; 16]>> for u64x2[src]

impl FromBits<Simd<[i8; 16]>> for u8x16[src]

impl FromBits<Simd<[i8; 2]>> for u8x2[src]

impl FromBits<Simd<[i8; 32]>> for f32x8[src]

impl FromBits<Simd<[i8; 32]>> for f64x4[src]

impl FromBits<Simd<[i8; 32]>> for i128x2[src]

impl FromBits<Simd<[i8; 32]>> for i16x16[src]

impl FromBits<Simd<[i8; 32]>> for i32x8[src]

impl FromBits<Simd<[i8; 32]>> for i64x4[src]

impl FromBits<Simd<[i8; 32]>> for u128x2[src]

impl FromBits<Simd<[i8; 32]>> for u16x16[src]

impl FromBits<Simd<[i8; 32]>> for u32x8[src]

impl FromBits<Simd<[i8; 32]>> for u64x4[src]

impl FromBits<Simd<[i8; 32]>> for u8x32[src]

impl FromBits<Simd<[i8; 4]>> for i16x2[src]

impl FromBits<Simd<[i8; 4]>> for u16x2[src]

impl FromBits<Simd<[i8; 4]>> for u8x4[src]

impl FromBits<Simd<[i8; 64]>> for f32x16[src]

impl FromBits<Simd<[i8; 64]>> for f64x8[src]

impl FromBits<Simd<[i8; 64]>> for i128x4[src]

impl FromBits<Simd<[i8; 64]>> for i16x32[src]

impl FromBits<Simd<[i8; 64]>> for i32x16[src]

impl FromBits<Simd<[i8; 64]>> for i64x8[src]

impl FromBits<Simd<[i8; 64]>> for u128x4[src]

impl FromBits<Simd<[i8; 64]>> for u16x32[src]

impl FromBits<Simd<[i8; 64]>> for u32x16[src]

impl FromBits<Simd<[i8; 64]>> for u64x8[src]

impl FromBits<Simd<[i8; 64]>> for u8x64[src]

impl FromBits<Simd<[i8; 8]>> for f32x2[src]

impl FromBits<Simd<[i8; 8]>> for i16x4[src]

impl FromBits<Simd<[i8; 8]>> for i32x2[src]

impl FromBits<Simd<[i8; 8]>> for u16x4[src]

impl FromBits<Simd<[i8; 8]>> for u32x2[src]

impl FromBits<Simd<[i8; 8]>> for u8x8[src]

impl FromBits<Simd<[u128; 1]>> for f32x4[src]

impl FromBits<Simd<[u128; 1]>> for f64x2[src]

impl FromBits<Simd<[u128; 1]>> for i128x1[src]

impl FromBits<Simd<[u128; 1]>> for i16x8[src]

impl FromBits<Simd<[u128; 1]>> for i32x4[src]

impl FromBits<Simd<[u128; 1]>> for i64x2[src]

impl FromBits<Simd<[u128; 1]>> for i8x16[src]

impl FromBits<Simd<[u128; 1]>> for u16x8[src]

impl FromBits<Simd<[u128; 1]>> for u32x4[src]

impl FromBits<Simd<[u128; 1]>> for u64x2[src]

impl FromBits<Simd<[u128; 1]>> for u8x16[src]

impl FromBits<Simd<[u128; 2]>> for f32x8[src]

impl FromBits<Simd<[u128; 2]>> for f64x4[src]

impl FromBits<Simd<[u128; 2]>> for i128x2[src]

impl FromBits<Simd<[u128; 2]>> for i16x16[src]

impl FromBits<Simd<[u128; 2]>> for i32x8[src]

impl FromBits<Simd<[u128; 2]>> for i64x4[src]

impl FromBits<Simd<[u128; 2]>> for i8x32[src]

impl FromBits<Simd<[u128; 2]>> for u16x16[src]

impl FromBits<Simd<[u128; 2]>> for u32x8[src]

impl FromBits<Simd<[u128; 2]>> for u64x4[src]

impl FromBits<Simd<[u128; 2]>> for u8x32[src]

impl FromBits<Simd<[u128; 4]>> for f32x16[src]

impl FromBits<Simd<[u128; 4]>> for f64x8[src]

impl FromBits<Simd<[u128; 4]>> for i128x4[src]

impl FromBits<Simd<[u128; 4]>> for i16x32[src]

impl FromBits<Simd<[u128; 4]>> for i32x16[src]

impl FromBits<Simd<[u128; 4]>> for i64x8[src]

impl FromBits<Simd<[u128; 4]>> for i8x64[src]

impl FromBits<Simd<[u128; 4]>> for u16x32[src]

impl FromBits<Simd<[u128; 4]>> for u32x16[src]

impl FromBits<Simd<[u128; 4]>> for u64x8[src]

impl FromBits<Simd<[u128; 4]>> for u8x64[src]

impl FromBits<Simd<[u16; 16]>> for f32x8[src]

impl FromBits<Simd<[u16; 16]>> for f64x4[src]

impl FromBits<Simd<[u16; 16]>> for i128x2[src]

impl FromBits<Simd<[u16; 16]>> for i16x16[src]

impl FromBits<Simd<[u16; 16]>> for i32x8[src]

impl FromBits<Simd<[u16; 16]>> for i64x4[src]

impl FromBits<Simd<[u16; 16]>> for i8x32[src]

impl FromBits<Simd<[u16; 16]>> for u128x2[src]

impl FromBits<Simd<[u16; 16]>> for u32x8[src]

impl FromBits<Simd<[u16; 16]>> for u64x4[src]

impl FromBits<Simd<[u16; 16]>> for u8x32[src]

impl FromBits<Simd<[u16; 2]>> for i16x2[src]

impl FromBits<Simd<[u16; 2]>> for i8x4[src]

impl FromBits<Simd<[u16; 2]>> for u8x4[src]

impl FromBits<Simd<[u16; 32]>> for f32x16[src]

impl FromBits<Simd<[u16; 32]>> for f64x8[src]

impl FromBits<Simd<[u16; 32]>> for i128x4[src]

impl FromBits<Simd<[u16; 32]>> for i16x32[src]

impl FromBits<Simd<[u16; 32]>> for i32x16[src]

impl FromBits<Simd<[u16; 32]>> for i64x8[src]

impl FromBits<Simd<[u16; 32]>> for i8x64[src]

impl FromBits<Simd<[u16; 32]>> for u128x4[src]

impl FromBits<Simd<[u16; 32]>> for u32x16[src]

impl FromBits<Simd<[u16; 32]>> for u64x8[src]

impl FromBits<Simd<[u16; 32]>> for u8x64[src]

impl FromBits<Simd<[u16; 4]>> for f32x2[src]

impl FromBits<Simd<[u16; 4]>> for i16x4[src]

impl FromBits<Simd<[u16; 4]>> for i32x2[src]

impl FromBits<Simd<[u16; 4]>> for i8x8[src]

impl FromBits<Simd<[u16; 4]>> for u32x2[src]

impl FromBits<Simd<[u16; 4]>> for u8x8[src]

impl FromBits<Simd<[u16; 8]>> for f32x4[src]

impl FromBits<Simd<[u16; 8]>> for f64x2[src]

impl FromBits<Simd<[u16; 8]>> for i128x1[src]

impl FromBits<Simd<[u16; 8]>> for i16x8[src]

impl FromBits<Simd<[u16; 8]>> for i32x4[src]

impl FromBits<Simd<[u16; 8]>> for i64x2[src]

impl FromBits<Simd<[u16; 8]>> for i8x16[src]

impl FromBits<Simd<[u16; 8]>> for u128x1[src]

impl FromBits<Simd<[u16; 8]>> for u32x4[src]

impl FromBits<Simd<[u16; 8]>> for u64x2[src]

impl FromBits<Simd<[u16; 8]>> for u8x16[src]

impl FromBits<Simd<[u32; 16]>> for f32x16[src]

impl FromBits<Simd<[u32; 16]>> for f64x8[src]

impl FromBits<Simd<[u32; 16]>> for i128x4[src]

impl FromBits<Simd<[u32; 16]>> for i16x32[src]

impl FromBits<Simd<[u32; 16]>> for i32x16[src]

impl FromBits<Simd<[u32; 16]>> for i64x8[src]

impl FromBits<Simd<[u32; 16]>> for i8x64[src]

impl FromBits<Simd<[u32; 16]>> for u128x4[src]

impl FromBits<Simd<[u32; 16]>> for u16x32[src]

impl FromBits<Simd<[u32; 16]>> for u64x8[src]

impl FromBits<Simd<[u32; 16]>> for u8x64[src]

impl FromBits<Simd<[u32; 2]>> for f32x2[src]

impl FromBits<Simd<[u32; 2]>> for i16x4[src]

impl FromBits<Simd<[u32; 2]>> for i32x2[src]

impl FromBits<Simd<[u32; 2]>> for i8x8[src]

impl FromBits<Simd<[u32; 2]>> for u16x4[src]

impl FromBits<Simd<[u32; 2]>> for u8x8[src]

impl FromBits<Simd<[u32; 4]>> for f32x4[src]

impl FromBits<Simd<[u32; 4]>> for f64x2[src]

impl FromBits<Simd<[u32; 4]>> for i128x1[src]

impl FromBits<Simd<[u32; 4]>> for i16x8[src]

impl FromBits<Simd<[u32; 4]>> for i32x4[src]

impl FromBits<Simd<[u32; 4]>> for i64x2[src]

impl FromBits<Simd<[u32; 4]>> for i8x16[src]

impl FromBits<Simd<[u32; 4]>> for u128x1[src]

impl FromBits<Simd<[u32; 4]>> for u16x8[src]

impl FromBits<Simd<[u32; 4]>> for u64x2[src]

impl FromBits<Simd<[u32; 4]>> for u8x16[src]

impl FromBits<Simd<[u32; 8]>> for f32x8[src]

impl FromBits<Simd<[u32; 8]>> for f64x4[src]

impl FromBits<Simd<[u32; 8]>> for i128x2[src]

impl FromBits<Simd<[u32; 8]>> for i16x16[src]

impl FromBits<Simd<[u32; 8]>> for i32x8[src]

impl FromBits<Simd<[u32; 8]>> for i64x4[src]

impl FromBits<Simd<[u32; 8]>> for i8x32[src]

impl FromBits<Simd<[u32; 8]>> for u128x2[src]

impl FromBits<Simd<[u32; 8]>> for u16x16[src]

impl FromBits<Simd<[u32; 8]>> for u64x4[src]

impl FromBits<Simd<[u32; 8]>> for u8x32[src]

impl FromBits<Simd<[u64; 2]>> for f32x4[src]

impl FromBits<Simd<[u64; 2]>> for f64x2[src]

impl FromBits<Simd<[u64; 2]>> for i128x1[src]

impl FromBits<Simd<[u64; 2]>> for i16x8[src]

impl FromBits<Simd<[u64; 2]>> for i32x4[src]

impl FromBits<Simd<[u64; 2]>> for i64x2[src]

impl FromBits<Simd<[u64; 2]>> for i8x16[src]

impl FromBits<Simd<[u64; 2]>> for u128x1[src]

impl FromBits<Simd<[u64; 2]>> for u16x8[src]

impl FromBits<Simd<[u64; 2]>> for u32x4[src]

impl FromBits<Simd<[u64; 2]>> for u8x16[src]

impl FromBits<Simd<[u64; 4]>> for f32x8[src]

impl FromBits<Simd<[u64; 4]>> for f64x4[src]

impl FromBits<Simd<[u64; 4]>> for i128x2[src]

impl FromBits<Simd<[u64; 4]>> for i16x16[src]

impl FromBits<Simd<[u64; 4]>> for i32x8[src]

impl FromBits<Simd<[u64; 4]>> for i64x4[src]

impl FromBits<Simd<[u64; 4]>> for i8x32[src]

impl FromBits<Simd<[u64; 4]>> for u128x2[src]

impl FromBits<Simd<[u64; 4]>> for u16x16[src]

impl FromBits<Simd<[u64; 4]>> for u32x8[src]

impl FromBits<Simd<[u64; 4]>> for u8x32[src]

impl FromBits<Simd<[u64; 8]>> for f32x16[src]

impl FromBits<Simd<[u64; 8]>> for f64x8[src]

impl FromBits<Simd<[u64; 8]>> for i128x4[src]

impl FromBits<Simd<[u64; 8]>> for i16x32[src]

impl FromBits<Simd<[u64; 8]>> for i32x16[src]

impl FromBits<Simd<[u64; 8]>> for i64x8[src]

impl FromBits<Simd<[u64; 8]>> for i8x64[src]

impl FromBits<Simd<[u64; 8]>> for u128x4[src]

impl FromBits<Simd<[u64; 8]>> for u16x32[src]

impl FromBits<Simd<[u64; 8]>> for u32x16[src]

impl FromBits<Simd<[u64; 8]>> for u8x64[src]

impl FromBits<Simd<[u8; 16]>> for f32x4[src]

impl FromBits<Simd<[u8; 16]>> for f64x2[src]

impl FromBits<Simd<[u8; 16]>> for i128x1[src]

impl FromBits<Simd<[u8; 16]>> for i16x8[src]

impl FromBits<Simd<[u8; 16]>> for i32x4[src]

impl FromBits<Simd<[u8; 16]>> for i64x2[src]

impl FromBits<Simd<[u8; 16]>> for i8x16[src]

impl FromBits<Simd<[u8; 16]>> for u128x1[src]

impl FromBits<Simd<[u8; 16]>> for u16x8[src]

impl FromBits<Simd<[u8; 16]>> for u32x4[src]

impl FromBits<Simd<[u8; 16]>> for u64x2[src]

impl FromBits<Simd<[u8; 2]>> for i8x2[src]

impl FromBits<Simd<[u8; 32]>> for f32x8[src]

impl FromBits<Simd<[u8; 32]>> for f64x4[src]

impl FromBits<Simd<[u8; 32]>> for i128x2[src]

impl FromBits<Simd<[u8; 32]>> for i16x16[src]

impl FromBits<Simd<[u8; 32]>> for i32x8[src]

impl FromBits<Simd<[u8; 32]>> for i64x4[src]

impl FromBits<Simd<[u8; 32]>> for i8x32[src]

impl FromBits<Simd<[u8; 32]>> for u128x2[src]

impl FromBits<Simd<[u8; 32]>> for u16x16[src]

impl FromBits<Simd<[u8; 32]>> for u32x8[src]

impl FromBits<Simd<[u8; 32]>> for u64x4[src]

impl FromBits<Simd<[u8; 4]>> for i16x2[src]

impl FromBits<Simd<[u8; 4]>> for i8x4[src]

impl FromBits<Simd<[u8; 4]>> for u16x2[src]

impl FromBits<Simd<[u8; 64]>> for f32x16[src]

impl FromBits<Simd<[u8; 64]>> for f64x8[src]

impl FromBits<Simd<[u8; 64]>> for i128x4[src]

impl FromBits<Simd<[u8; 64]>> for i16x32[src]

impl FromBits<Simd<[u8; 64]>> for i32x16[src]

impl FromBits<Simd<[u8; 64]>> for i64x8[src]

impl FromBits<Simd<[u8; 64]>> for i8x64[src]

impl FromBits<Simd<[u8; 64]>> for u128x4[src]

impl FromBits<Simd<[u8; 64]>> for u16x32[src]

impl FromBits<Simd<[u8; 64]>> for u32x16[src]

impl FromBits<Simd<[u8; 64]>> for u64x8[src]

impl FromBits<Simd<[u8; 8]>> for f32x2[src]

impl FromBits<Simd<[u8; 8]>> for i16x4[src]

impl FromBits<Simd<[u8; 8]>> for i32x2[src]

impl FromBits<Simd<[u8; 8]>> for i8x8[src]

impl FromBits<Simd<[u8; 8]>> for u16x4[src]

impl FromBits<Simd<[u8; 8]>> for u32x2[src]

impl FromBits<__m128> for f32x4[src]

impl FromBits<__m128> for f64x2[src]

impl FromBits<__m128> for i128x1[src]

impl FromBits<__m128> for i16x8[src]

impl FromBits<__m128> for i32x4[src]

impl FromBits<__m128> for i64x2[src]

impl FromBits<__m128> for i8x16[src]

impl FromBits<__m128> for u128x1[src]

impl FromBits<__m128> for u16x8[src]

impl FromBits<__m128> for u32x4[src]

impl FromBits<__m128> for u64x2[src]

impl FromBits<__m128> for u8x16[src]

impl FromBits<__m128d> for f32x4[src]

impl FromBits<__m128d> for f64x2[src]

impl FromBits<__m128d> for i128x1[src]

impl FromBits<__m128d> for i16x8[src]

impl FromBits<__m128d> for i32x4[src]

impl FromBits<__m128d> for i64x2[src]

impl FromBits<__m128d> for i8x16[src]

impl FromBits<__m128d> for u128x1[src]

impl FromBits<__m128d> for u16x8[src]

impl FromBits<__m128d> for u32x4[src]

impl FromBits<__m128d> for u64x2[src]

impl FromBits<__m128d> for u8x16[src]

impl FromBits<__m128i> for f32x4[src]

impl FromBits<__m128i> for f64x2[src]

impl FromBits<__m128i> for i128x1[src]

impl FromBits<__m128i> for i16x8[src]

impl FromBits<__m128i> for i32x4[src]

impl FromBits<__m128i> for i64x2[src]

impl FromBits<__m128i> for i8x16[src]

impl FromBits<__m128i> for u128x1[src]

impl FromBits<__m128i> for u16x8[src]

impl FromBits<__m128i> for u32x4[src]

impl FromBits<__m128i> for u64x2[src]

impl FromBits<__m128i> for u8x16[src]

impl FromBits<__m256> for f32x8[src]

impl FromBits<__m256> for f64x4[src]

impl FromBits<__m256> for i128x2[src]

impl FromBits<__m256> for i16x16[src]

impl FromBits<__m256> for i32x8[src]

impl FromBits<__m256> for i64x4[src]

impl FromBits<__m256> for i8x32[src]

impl FromBits<__m256> for u128x2[src]

impl FromBits<__m256> for u16x16[src]

impl FromBits<__m256> for u32x8[src]

impl FromBits<__m256> for u64x4[src]

impl FromBits<__m256> for u8x32[src]

impl FromBits<__m256d> for f32x8[src]

impl FromBits<__m256d> for f64x4[src]

impl FromBits<__m256d> for i128x2[src]

impl FromBits<__m256d> for i16x16[src]

impl FromBits<__m256d> for i32x8[src]

impl FromBits<__m256d> for i64x4[src]

impl FromBits<__m256d> for i8x32[src]

impl FromBits<__m256d> for u128x2[src]

impl FromBits<__m256d> for u16x16[src]

impl FromBits<__m256d> for u32x8[src]

impl FromBits<__m256d> for u64x4[src]

impl FromBits<__m256d> for u8x32[src]

impl FromBits<__m256i> for f32x8[src]

impl FromBits<__m256i> for f64x4[src]

impl FromBits<__m256i> for i128x2[src]

impl FromBits<__m256i> for i16x16[src]

impl FromBits<__m256i> for i32x8[src]

impl FromBits<__m256i> for i64x4[src]

impl FromBits<__m256i> for i8x32[src]

impl FromBits<__m256i> for u128x2[src]

impl FromBits<__m256i> for u16x16[src]

impl FromBits<__m256i> for u32x8[src]

impl FromBits<__m256i> for u64x4[src]

impl FromBits<__m256i> for u8x32[src]

impl FromBits<__m64> for f32x2[src]

impl FromBits<__m64> for i16x4[src]

impl FromBits<__m64> for i32x2[src]

impl FromBits<__m64> for i8x8[src]

impl FromBits<__m64> for u16x4[src]

impl FromBits<__m64> for u32x2[src]

impl FromBits<__m64> for u8x8[src]

impl<T> FromBits<T> for T[src]

FromBits and IntoBits are reflexive

+
Loading content...
\ No newline at end of file diff --git a/packed_simd/trait.FromCast.html b/packed_simd/trait.FromCast.html new file mode 100644 index 000000000..3aef9394a --- /dev/null +++ b/packed_simd/trait.FromCast.html @@ -0,0 +1,44 @@ +packed_simd::FromCast - Rust

[][src]Trait packed_simd::FromCast

pub trait FromCast<T>: Sized {
+    fn from_cast(_: T) -> Self;
+}

Numeric cast from T to Self.

+
+

Note: This is a temporary workaround until the conversion traits +specified > in RFC2484 are implemented.

+
+

Numeric cast between vectors with the same number of lanes, such that:

+
    +
  • +

    casting integer vectors whose lane types have the same size (e.g. i32xN +-> u32xN) is a no-op,

    +
  • +
  • +

    casting from a larger integer to a smaller integer (e.g. u32xN -> +u8xN) will truncate,

    +
  • +
  • +

    casting from a smaller integer to a larger integer (e.g. u8xN -> +u32xN) will:

    +
      +
    • zero-extend if the source is unsigned, or
    • +
    • sign-extend if the source is signed,
    • +
    +
  • +
  • +

    casting from a float to an integer will round the float towards zero,

    +
  • +
  • +

    casting from an integer to float will produce the floating point +representation of the integer, rounding to nearest, ties to even,

    +
  • +
  • +

    casting from an f32 to an f64 is perfect and lossless,

    +
  • +
  • +

    casting from an f64 to an f32 rounds to nearest, ties to even.

    +
  • +
+
+

Required methods

fn from_cast(_: T) -> Self

Numeric cast from T to Self.

+
Loading content... +

Implementors

impl FromCast<Simd<[m128; 1]>> for i128x1[src]

impl FromCast<Simd<[m128; 1]>> for u128x1[src]

impl FromCast<Simd<[m128; 2]>> for f32x2[src]

impl FromCast<Simd<[m128; 2]>> for f64x2[src]

impl FromCast<Simd<[m128; 2]>> for i128x2[src]

impl FromCast<Simd<[m128; 2]>> for i16x2[src]

impl FromCast<Simd<[m128; 2]>> for i32x2[src]

impl FromCast<Simd<[m128; 2]>> for i64x2[src]

impl FromCast<Simd<[m128; 2]>> for i8x2[src]

impl FromCast<Simd<[m128; 2]>> for isizex2[src]

impl FromCast<Simd<[m128; 2]>> for m16x2[src]

impl FromCast<Simd<[m128; 2]>> for m32x2[src]

impl FromCast<Simd<[m128; 2]>> for m64x2[src]

impl FromCast<Simd<[m128; 2]>> for m8x2[src]

impl FromCast<Simd<[m128; 2]>> for msizex2[src]

impl FromCast<Simd<[m128; 2]>> for u128x2[src]

impl FromCast<Simd<[m128; 2]>> for u16x2[src]

impl FromCast<Simd<[m128; 2]>> for u32x2[src]

impl FromCast<Simd<[m128; 2]>> for u64x2[src]

impl FromCast<Simd<[m128; 2]>> for u8x2[src]

impl FromCast<Simd<[m128; 2]>> for usizex2[src]

impl FromCast<Simd<[m128; 4]>> for f32x4[src]

impl FromCast<Simd<[m128; 4]>> for f64x4[src]

impl FromCast<Simd<[m128; 4]>> for i128x4[src]

impl FromCast<Simd<[m128; 4]>> for i16x4[src]

impl FromCast<Simd<[m128; 4]>> for i32x4[src]

impl FromCast<Simd<[m128; 4]>> for i64x4[src]

impl FromCast<Simd<[m128; 4]>> for i8x4[src]

impl FromCast<Simd<[m128; 4]>> for isizex4[src]

impl FromCast<Simd<[m128; 4]>> for m16x4[src]

impl FromCast<Simd<[m128; 4]>> for m32x4[src]

impl FromCast<Simd<[m128; 4]>> for m64x4[src]

impl FromCast<Simd<[m128; 4]>> for m8x4[src]

impl FromCast<Simd<[m128; 4]>> for msizex4[src]

impl FromCast<Simd<[m128; 4]>> for u128x4[src]

impl FromCast<Simd<[m128; 4]>> for u16x4[src]

impl FromCast<Simd<[m128; 4]>> for u32x4[src]

impl FromCast<Simd<[m128; 4]>> for u64x4[src]

impl FromCast<Simd<[m128; 4]>> for u8x4[src]

impl FromCast<Simd<[m128; 4]>> for usizex4[src]

impl FromCast<Simd<[m16; 16]>> for f32x16[src]

impl FromCast<Simd<[m16; 16]>> for i16x16[src]

impl FromCast<Simd<[m16; 16]>> for i32x16[src]

impl FromCast<Simd<[m16; 16]>> for i8x16[src]

impl FromCast<Simd<[m16; 16]>> for m32x16[src]

impl FromCast<Simd<[m16; 16]>> for m8x16[src]

impl FromCast<Simd<[m16; 16]>> for u16x16[src]

impl FromCast<Simd<[m16; 16]>> for u32x16[src]

impl FromCast<Simd<[m16; 16]>> for u8x16[src]

impl FromCast<Simd<[m16; 2]>> for f32x2[src]

impl FromCast<Simd<[m16; 2]>> for f64x2[src]

impl FromCast<Simd<[m16; 2]>> for i128x2[src]

impl FromCast<Simd<[m16; 2]>> for i16x2[src]

impl FromCast<Simd<[m16; 2]>> for i32x2[src]

impl FromCast<Simd<[m16; 2]>> for i64x2[src]

impl FromCast<Simd<[m16; 2]>> for i8x2[src]

impl FromCast<Simd<[m16; 2]>> for isizex2[src]

impl FromCast<Simd<[m16; 2]>> for m128x2[src]

impl FromCast<Simd<[m16; 2]>> for m32x2[src]

impl FromCast<Simd<[m16; 2]>> for m64x2[src]

impl FromCast<Simd<[m16; 2]>> for m8x2[src]

impl FromCast<Simd<[m16; 2]>> for msizex2[src]

impl FromCast<Simd<[m16; 2]>> for u128x2[src]

impl FromCast<Simd<[m16; 2]>> for u16x2[src]

impl FromCast<Simd<[m16; 2]>> for u32x2[src]

impl FromCast<Simd<[m16; 2]>> for u64x2[src]

impl FromCast<Simd<[m16; 2]>> for u8x2[src]

impl FromCast<Simd<[m16; 2]>> for usizex2[src]

impl FromCast<Simd<[m16; 32]>> for i16x32[src]

impl FromCast<Simd<[m16; 32]>> for i8x32[src]

impl FromCast<Simd<[m16; 32]>> for m8x32[src]

impl FromCast<Simd<[m16; 32]>> for u16x32[src]

impl FromCast<Simd<[m16; 32]>> for u8x32[src]

impl FromCast<Simd<[m16; 4]>> for f32x4[src]

impl FromCast<Simd<[m16; 4]>> for f64x4[src]

impl FromCast<Simd<[m16; 4]>> for i128x4[src]

impl FromCast<Simd<[m16; 4]>> for i16x4[src]

impl FromCast<Simd<[m16; 4]>> for i32x4[src]

impl FromCast<Simd<[m16; 4]>> for i64x4[src]

impl FromCast<Simd<[m16; 4]>> for i8x4[src]

impl FromCast<Simd<[m16; 4]>> for isizex4[src]

impl FromCast<Simd<[m16; 4]>> for m128x4[src]

impl FromCast<Simd<[m16; 4]>> for m32x4[src]

impl FromCast<Simd<[m16; 4]>> for m64x4[src]

impl FromCast<Simd<[m16; 4]>> for m8x4[src]

impl FromCast<Simd<[m16; 4]>> for msizex4[src]

impl FromCast<Simd<[m16; 4]>> for u128x4[src]

impl FromCast<Simd<[m16; 4]>> for u16x4[src]

impl FromCast<Simd<[m16; 4]>> for u32x4[src]

impl FromCast<Simd<[m16; 4]>> for u64x4[src]

impl FromCast<Simd<[m16; 4]>> for u8x4[src]

impl FromCast<Simd<[m16; 4]>> for usizex4[src]

impl FromCast<Simd<[m16; 8]>> for f32x8[src]

impl FromCast<Simd<[m16; 8]>> for f64x8[src]

impl FromCast<Simd<[m16; 8]>> for i16x8[src]

impl FromCast<Simd<[m16; 8]>> for i32x8[src]

impl FromCast<Simd<[m16; 8]>> for i64x8[src]

impl FromCast<Simd<[m16; 8]>> for i8x8[src]

impl FromCast<Simd<[m16; 8]>> for isizex8[src]

impl FromCast<Simd<[m16; 8]>> for m32x8[src]

impl FromCast<Simd<[m16; 8]>> for m64x8[src]

impl FromCast<Simd<[m16; 8]>> for m8x8[src]

impl FromCast<Simd<[m16; 8]>> for msizex8[src]

impl FromCast<Simd<[m16; 8]>> for u16x8[src]

impl FromCast<Simd<[m16; 8]>> for u32x8[src]

impl FromCast<Simd<[m16; 8]>> for u64x8[src]

impl FromCast<Simd<[m16; 8]>> for u8x8[src]

impl FromCast<Simd<[m16; 8]>> for usizex8[src]

impl FromCast<Simd<[m32; 16]>> for f32x16[src]

impl FromCast<Simd<[m32; 16]>> for i16x16[src]

impl FromCast<Simd<[m32; 16]>> for i32x16[src]

impl FromCast<Simd<[m32; 16]>> for i8x16[src]

impl FromCast<Simd<[m32; 16]>> for m16x16[src]

impl FromCast<Simd<[m32; 16]>> for m8x16[src]

impl FromCast<Simd<[m32; 16]>> for u16x16[src]

impl FromCast<Simd<[m32; 16]>> for u32x16[src]

impl FromCast<Simd<[m32; 16]>> for u8x16[src]

impl FromCast<Simd<[m32; 2]>> for f32x2[src]

impl FromCast<Simd<[m32; 2]>> for f64x2[src]

impl FromCast<Simd<[m32; 2]>> for i128x2[src]

impl FromCast<Simd<[m32; 2]>> for i16x2[src]

impl FromCast<Simd<[m32; 2]>> for i32x2[src]

impl FromCast<Simd<[m32; 2]>> for i64x2[src]

impl FromCast<Simd<[m32; 2]>> for i8x2[src]

impl FromCast<Simd<[m32; 2]>> for isizex2[src]

impl FromCast<Simd<[m32; 2]>> for m128x2[src]

impl FromCast<Simd<[m32; 2]>> for m16x2[src]

impl FromCast<Simd<[m32; 2]>> for m64x2[src]

impl FromCast<Simd<[m32; 2]>> for m8x2[src]

impl FromCast<Simd<[m32; 2]>> for msizex2[src]

impl FromCast<Simd<[m32; 2]>> for u128x2[src]

impl FromCast<Simd<[m32; 2]>> for u16x2[src]

impl FromCast<Simd<[m32; 2]>> for u32x2[src]

impl FromCast<Simd<[m32; 2]>> for u64x2[src]

impl FromCast<Simd<[m32; 2]>> for u8x2[src]

impl FromCast<Simd<[m32; 2]>> for usizex2[src]

impl FromCast<Simd<[m32; 4]>> for f32x4[src]

impl FromCast<Simd<[m32; 4]>> for f64x4[src]

impl FromCast<Simd<[m32; 4]>> for i128x4[src]

impl FromCast<Simd<[m32; 4]>> for i16x4[src]

impl FromCast<Simd<[m32; 4]>> for i32x4[src]

impl FromCast<Simd<[m32; 4]>> for i64x4[src]

impl FromCast<Simd<[m32; 4]>> for i8x4[src]

impl FromCast<Simd<[m32; 4]>> for isizex4[src]

impl FromCast<Simd<[m32; 4]>> for m128x4[src]

impl FromCast<Simd<[m32; 4]>> for m16x4[src]

impl FromCast<Simd<[m32; 4]>> for m64x4[src]

impl FromCast<Simd<[m32; 4]>> for m8x4[src]

impl FromCast<Simd<[m32; 4]>> for msizex4[src]

impl FromCast<Simd<[m32; 4]>> for u128x4[src]

impl FromCast<Simd<[m32; 4]>> for u16x4[src]

impl FromCast<Simd<[m32; 4]>> for u32x4[src]

impl FromCast<Simd<[m32; 4]>> for u64x4[src]

impl FromCast<Simd<[m32; 4]>> for u8x4[src]

impl FromCast<Simd<[m32; 4]>> for usizex4[src]

impl FromCast<Simd<[m32; 8]>> for f32x8[src]

impl FromCast<Simd<[m32; 8]>> for f64x8[src]

impl FromCast<Simd<[m32; 8]>> for i16x8[src]

impl FromCast<Simd<[m32; 8]>> for i32x8[src]

impl FromCast<Simd<[m32; 8]>> for i64x8[src]

impl FromCast<Simd<[m32; 8]>> for i8x8[src]

impl FromCast<Simd<[m32; 8]>> for isizex8[src]

impl FromCast<Simd<[m32; 8]>> for m16x8[src]

impl FromCast<Simd<[m32; 8]>> for m64x8[src]

impl FromCast<Simd<[m32; 8]>> for m8x8[src]

impl FromCast<Simd<[m32; 8]>> for msizex8[src]

impl FromCast<Simd<[m32; 8]>> for u16x8[src]

impl FromCast<Simd<[m32; 8]>> for u32x8[src]

impl FromCast<Simd<[m32; 8]>> for u64x8[src]

impl FromCast<Simd<[m32; 8]>> for u8x8[src]

impl FromCast<Simd<[m32; 8]>> for usizex8[src]

impl FromCast<Simd<[m64; 2]>> for f32x2[src]

impl FromCast<Simd<[m64; 2]>> for f64x2[src]

impl FromCast<Simd<[m64; 2]>> for i128x2[src]

impl FromCast<Simd<[m64; 2]>> for i16x2[src]

impl FromCast<Simd<[m64; 2]>> for i32x2[src]

impl FromCast<Simd<[m64; 2]>> for i64x2[src]

impl FromCast<Simd<[m64; 2]>> for i8x2[src]

impl FromCast<Simd<[m64; 2]>> for isizex2[src]

impl FromCast<Simd<[m64; 2]>> for m128x2[src]

impl FromCast<Simd<[m64; 2]>> for m16x2[src]

impl FromCast<Simd<[m64; 2]>> for m32x2[src]

impl FromCast<Simd<[m64; 2]>> for m8x2[src]

impl FromCast<Simd<[m64; 2]>> for msizex2[src]

impl FromCast<Simd<[m64; 2]>> for u128x2[src]

impl FromCast<Simd<[m64; 2]>> for u16x2[src]

impl FromCast<Simd<[m64; 2]>> for u32x2[src]

impl FromCast<Simd<[m64; 2]>> for u64x2[src]

impl FromCast<Simd<[m64; 2]>> for u8x2[src]

impl FromCast<Simd<[m64; 2]>> for usizex2[src]

impl FromCast<Simd<[m64; 4]>> for f32x4[src]

impl FromCast<Simd<[m64; 4]>> for f64x4[src]

impl FromCast<Simd<[m64; 4]>> for i128x4[src]

impl FromCast<Simd<[m64; 4]>> for i16x4[src]

impl FromCast<Simd<[m64; 4]>> for i32x4[src]

impl FromCast<Simd<[m64; 4]>> for i64x4[src]

impl FromCast<Simd<[m64; 4]>> for i8x4[src]

impl FromCast<Simd<[m64; 4]>> for isizex4[src]

impl FromCast<Simd<[m64; 4]>> for m128x4[src]

impl FromCast<Simd<[m64; 4]>> for m16x4[src]

impl FromCast<Simd<[m64; 4]>> for m32x4[src]

impl FromCast<Simd<[m64; 4]>> for m8x4[src]

impl FromCast<Simd<[m64; 4]>> for msizex4[src]

impl FromCast<Simd<[m64; 4]>> for u128x4[src]

impl FromCast<Simd<[m64; 4]>> for u16x4[src]

impl FromCast<Simd<[m64; 4]>> for u32x4[src]

impl FromCast<Simd<[m64; 4]>> for u64x4[src]

impl FromCast<Simd<[m64; 4]>> for u8x4[src]

impl FromCast<Simd<[m64; 4]>> for usizex4[src]

impl FromCast<Simd<[m64; 8]>> for f32x8[src]

impl FromCast<Simd<[m64; 8]>> for f64x8[src]

impl FromCast<Simd<[m64; 8]>> for i16x8[src]

impl FromCast<Simd<[m64; 8]>> for i32x8[src]

impl FromCast<Simd<[m64; 8]>> for i64x8[src]

impl FromCast<Simd<[m64; 8]>> for i8x8[src]

impl FromCast<Simd<[m64; 8]>> for isizex8[src]

impl FromCast<Simd<[m64; 8]>> for m16x8[src]

impl FromCast<Simd<[m64; 8]>> for m32x8[src]

impl FromCast<Simd<[m64; 8]>> for m8x8[src]

impl FromCast<Simd<[m64; 8]>> for msizex8[src]

impl FromCast<Simd<[m64; 8]>> for u16x8[src]

impl FromCast<Simd<[m64; 8]>> for u32x8[src]

impl FromCast<Simd<[m64; 8]>> for u64x8[src]

impl FromCast<Simd<[m64; 8]>> for u8x8[src]

impl FromCast<Simd<[m64; 8]>> for usizex8[src]

impl FromCast<Simd<[m8; 16]>> for f32x16[src]

impl FromCast<Simd<[m8; 16]>> for i16x16[src]

impl FromCast<Simd<[m8; 16]>> for i32x16[src]

impl FromCast<Simd<[m8; 16]>> for i8x16[src]

impl FromCast<Simd<[m8; 16]>> for m16x16[src]

impl FromCast<Simd<[m8; 16]>> for m32x16[src]

impl FromCast<Simd<[m8; 16]>> for u16x16[src]

impl FromCast<Simd<[m8; 16]>> for u32x16[src]

impl FromCast<Simd<[m8; 16]>> for u8x16[src]

impl FromCast<Simd<[m8; 2]>> for f32x2[src]

impl FromCast<Simd<[m8; 2]>> for f64x2[src]

impl FromCast<Simd<[m8; 2]>> for i128x2[src]

impl FromCast<Simd<[m8; 2]>> for i16x2[src]

impl FromCast<Simd<[m8; 2]>> for i32x2[src]

impl FromCast<Simd<[m8; 2]>> for i64x2[src]

impl FromCast<Simd<[m8; 2]>> for i8x2[src]

impl FromCast<Simd<[m8; 2]>> for isizex2[src]

impl FromCast<Simd<[m8; 2]>> for m128x2[src]

impl FromCast<Simd<[m8; 2]>> for m16x2[src]

impl FromCast<Simd<[m8; 2]>> for m32x2[src]

impl FromCast<Simd<[m8; 2]>> for m64x2[src]

impl FromCast<Simd<[m8; 2]>> for msizex2[src]

impl FromCast<Simd<[m8; 2]>> for u128x2[src]

impl FromCast<Simd<[m8; 2]>> for u16x2[src]

impl FromCast<Simd<[m8; 2]>> for u32x2[src]

impl FromCast<Simd<[m8; 2]>> for u64x2[src]

impl FromCast<Simd<[m8; 2]>> for u8x2[src]

impl FromCast<Simd<[m8; 2]>> for usizex2[src]

impl FromCast<Simd<[m8; 32]>> for i16x32[src]

impl FromCast<Simd<[m8; 32]>> for i8x32[src]

impl FromCast<Simd<[m8; 32]>> for m16x32[src]

impl FromCast<Simd<[m8; 32]>> for u16x32[src]

impl FromCast<Simd<[m8; 32]>> for u8x32[src]

impl FromCast<Simd<[m8; 4]>> for f32x4[src]

impl FromCast<Simd<[m8; 4]>> for f64x4[src]

impl FromCast<Simd<[m8; 4]>> for i128x4[src]

impl FromCast<Simd<[m8; 4]>> for i16x4[src]

impl FromCast<Simd<[m8; 4]>> for i32x4[src]

impl FromCast<Simd<[m8; 4]>> for i64x4[src]

impl FromCast<Simd<[m8; 4]>> for i8x4[src]

impl FromCast<Simd<[m8; 4]>> for isizex4[src]

impl FromCast<Simd<[m8; 4]>> for m128x4[src]

impl FromCast<Simd<[m8; 4]>> for m16x4[src]

impl FromCast<Simd<[m8; 4]>> for m32x4[src]

impl FromCast<Simd<[m8; 4]>> for m64x4[src]

impl FromCast<Simd<[m8; 4]>> for msizex4[src]

impl FromCast<Simd<[m8; 4]>> for u128x4[src]

impl FromCast<Simd<[m8; 4]>> for u16x4[src]

impl FromCast<Simd<[m8; 4]>> for u32x4[src]

impl FromCast<Simd<[m8; 4]>> for u64x4[src]

impl FromCast<Simd<[m8; 4]>> for u8x4[src]

impl FromCast<Simd<[m8; 4]>> for usizex4[src]

impl FromCast<Simd<[m8; 64]>> for i8x64[src]

impl FromCast<Simd<[m8; 64]>> for u8x64[src]

impl FromCast<Simd<[m8; 8]>> for f32x8[src]

impl FromCast<Simd<[m8; 8]>> for f64x8[src]

impl FromCast<Simd<[m8; 8]>> for i16x8[src]

impl FromCast<Simd<[m8; 8]>> for i32x8[src]

impl FromCast<Simd<[m8; 8]>> for i64x8[src]

impl FromCast<Simd<[m8; 8]>> for i8x8[src]

impl FromCast<Simd<[m8; 8]>> for isizex8[src]

impl FromCast<Simd<[m8; 8]>> for m16x8[src]

impl FromCast<Simd<[m8; 8]>> for m32x8[src]

impl FromCast<Simd<[m8; 8]>> for m64x8[src]

impl FromCast<Simd<[m8; 8]>> for msizex8[src]

impl FromCast<Simd<[m8; 8]>> for u16x8[src]

impl FromCast<Simd<[m8; 8]>> for u32x8[src]

impl FromCast<Simd<[m8; 8]>> for u64x8[src]

impl FromCast<Simd<[m8; 8]>> for u8x8[src]

impl FromCast<Simd<[m8; 8]>> for usizex8[src]

impl FromCast<Simd<[msize; 2]>> for f32x2[src]

impl FromCast<Simd<[msize; 2]>> for f64x2[src]

impl FromCast<Simd<[msize; 2]>> for i128x2[src]

impl FromCast<Simd<[msize; 2]>> for i16x2[src]

impl FromCast<Simd<[msize; 2]>> for i32x2[src]

impl FromCast<Simd<[msize; 2]>> for i64x2[src]

impl FromCast<Simd<[msize; 2]>> for i8x2[src]

impl FromCast<Simd<[msize; 2]>> for isizex2[src]

impl FromCast<Simd<[msize; 2]>> for m128x2[src]

impl FromCast<Simd<[msize; 2]>> for m16x2[src]

impl FromCast<Simd<[msize; 2]>> for m32x2[src]

impl FromCast<Simd<[msize; 2]>> for m64x2[src]

impl FromCast<Simd<[msize; 2]>> for m8x2[src]

impl FromCast<Simd<[msize; 2]>> for u128x2[src]

impl FromCast<Simd<[msize; 2]>> for u16x2[src]

impl FromCast<Simd<[msize; 2]>> for u32x2[src]

impl FromCast<Simd<[msize; 2]>> for u64x2[src]

impl FromCast<Simd<[msize; 2]>> for u8x2[src]

impl FromCast<Simd<[msize; 2]>> for usizex2[src]

impl FromCast<Simd<[msize; 4]>> for f32x4[src]

impl FromCast<Simd<[msize; 4]>> for f64x4[src]

impl FromCast<Simd<[msize; 4]>> for i128x4[src]

impl FromCast<Simd<[msize; 4]>> for i16x4[src]

impl FromCast<Simd<[msize; 4]>> for i32x4[src]

impl FromCast<Simd<[msize; 4]>> for i64x4[src]

impl FromCast<Simd<[msize; 4]>> for i8x4[src]

impl FromCast<Simd<[msize; 4]>> for isizex4[src]

impl FromCast<Simd<[msize; 4]>> for m128x4[src]

impl FromCast<Simd<[msize; 4]>> for m16x4[src]

impl FromCast<Simd<[msize; 4]>> for m32x4[src]

impl FromCast<Simd<[msize; 4]>> for m64x4[src]

impl FromCast<Simd<[msize; 4]>> for m8x4[src]

impl FromCast<Simd<[msize; 4]>> for u128x4[src]

impl FromCast<Simd<[msize; 4]>> for u16x4[src]

impl FromCast<Simd<[msize; 4]>> for u32x4[src]

impl FromCast<Simd<[msize; 4]>> for u64x4[src]

impl FromCast<Simd<[msize; 4]>> for u8x4[src]

impl FromCast<Simd<[msize; 4]>> for usizex4[src]

impl FromCast<Simd<[msize; 8]>> for f32x8[src]

impl FromCast<Simd<[msize; 8]>> for f64x8[src]

impl FromCast<Simd<[msize; 8]>> for i16x8[src]

impl FromCast<Simd<[msize; 8]>> for i32x8[src]

impl FromCast<Simd<[msize; 8]>> for i64x8[src]

impl FromCast<Simd<[msize; 8]>> for i8x8[src]

impl FromCast<Simd<[msize; 8]>> for isizex8[src]

impl FromCast<Simd<[msize; 8]>> for m16x8[src]

impl FromCast<Simd<[msize; 8]>> for m32x8[src]

impl FromCast<Simd<[msize; 8]>> for m64x8[src]

impl FromCast<Simd<[msize; 8]>> for m8x8[src]

impl FromCast<Simd<[msize; 8]>> for u16x8[src]

impl FromCast<Simd<[msize; 8]>> for u32x8[src]

impl FromCast<Simd<[msize; 8]>> for u64x8[src]

impl FromCast<Simd<[msize; 8]>> for u8x8[src]

impl FromCast<Simd<[msize; 8]>> for usizex8[src]

impl FromCast<Simd<[f32; 16]>> for i16x16[src]

impl FromCast<Simd<[f32; 16]>> for i32x16[src]

impl FromCast<Simd<[f32; 16]>> for i8x16[src]

impl FromCast<Simd<[f32; 16]>> for m16x16[src]

impl FromCast<Simd<[f32; 16]>> for m32x16[src]

impl FromCast<Simd<[f32; 16]>> for m8x16[src]

impl FromCast<Simd<[f32; 16]>> for u16x16[src]

impl FromCast<Simd<[f32; 16]>> for u32x16[src]

impl FromCast<Simd<[f32; 16]>> for u8x16[src]

impl FromCast<Simd<[f32; 2]>> for f64x2[src]

impl FromCast<Simd<[f32; 2]>> for i128x2[src]

impl FromCast<Simd<[f32; 2]>> for i16x2[src]

impl FromCast<Simd<[f32; 2]>> for i32x2[src]

impl FromCast<Simd<[f32; 2]>> for i64x2[src]

impl FromCast<Simd<[f32; 2]>> for i8x2[src]

impl FromCast<Simd<[f32; 2]>> for isizex2[src]

impl FromCast<Simd<[f32; 2]>> for m128x2[src]

impl FromCast<Simd<[f32; 2]>> for m16x2[src]

impl FromCast<Simd<[f32; 2]>> for m32x2[src]

impl FromCast<Simd<[f32; 2]>> for m64x2[src]

impl FromCast<Simd<[f32; 2]>> for m8x2[src]

impl FromCast<Simd<[f32; 2]>> for msizex2[src]

impl FromCast<Simd<[f32; 2]>> for u128x2[src]

impl FromCast<Simd<[f32; 2]>> for u16x2[src]

impl FromCast<Simd<[f32; 2]>> for u32x2[src]

impl FromCast<Simd<[f32; 2]>> for u64x2[src]

impl FromCast<Simd<[f32; 2]>> for u8x2[src]

impl FromCast<Simd<[f32; 2]>> for usizex2[src]

impl FromCast<Simd<[f32; 4]>> for f64x4[src]

impl FromCast<Simd<[f32; 4]>> for i128x4[src]

impl FromCast<Simd<[f32; 4]>> for i16x4[src]

impl FromCast<Simd<[f32; 4]>> for i32x4[src]

impl FromCast<Simd<[f32; 4]>> for i64x4[src]

impl FromCast<Simd<[f32; 4]>> for i8x4[src]

impl FromCast<Simd<[f32; 4]>> for isizex4[src]

impl FromCast<Simd<[f32; 4]>> for m128x4[src]

impl FromCast<Simd<[f32; 4]>> for m16x4[src]

impl FromCast<Simd<[f32; 4]>> for m32x4[src]

impl FromCast<Simd<[f32; 4]>> for m64x4[src]

impl FromCast<Simd<[f32; 4]>> for m8x4[src]

impl FromCast<Simd<[f32; 4]>> for msizex4[src]

impl FromCast<Simd<[f32; 4]>> for u128x4[src]

impl FromCast<Simd<[f32; 4]>> for u16x4[src]

impl FromCast<Simd<[f32; 4]>> for u32x4[src]

impl FromCast<Simd<[f32; 4]>> for u64x4[src]

impl FromCast<Simd<[f32; 4]>> for u8x4[src]

impl FromCast<Simd<[f32; 4]>> for usizex4[src]

impl FromCast<Simd<[f32; 8]>> for f64x8[src]

impl FromCast<Simd<[f32; 8]>> for i16x8[src]

impl FromCast<Simd<[f32; 8]>> for i32x8[src]

impl FromCast<Simd<[f32; 8]>> for i64x8[src]

impl FromCast<Simd<[f32; 8]>> for i8x8[src]

impl FromCast<Simd<[f32; 8]>> for isizex8[src]

impl FromCast<Simd<[f32; 8]>> for m16x8[src]

impl FromCast<Simd<[f32; 8]>> for m32x8[src]

impl FromCast<Simd<[f32; 8]>> for m64x8[src]

impl FromCast<Simd<[f32; 8]>> for m8x8[src]

impl FromCast<Simd<[f32; 8]>> for msizex8[src]

impl FromCast<Simd<[f32; 8]>> for u16x8[src]

impl FromCast<Simd<[f32; 8]>> for u32x8[src]

impl FromCast<Simd<[f32; 8]>> for u64x8[src]

impl FromCast<Simd<[f32; 8]>> for u8x8[src]

impl FromCast<Simd<[f32; 8]>> for usizex8[src]

impl FromCast<Simd<[f64; 2]>> for f32x2[src]

impl FromCast<Simd<[f64; 2]>> for i128x2[src]

impl FromCast<Simd<[f64; 2]>> for i16x2[src]

impl FromCast<Simd<[f64; 2]>> for i32x2[src]

impl FromCast<Simd<[f64; 2]>> for i64x2[src]

impl FromCast<Simd<[f64; 2]>> for i8x2[src]

impl FromCast<Simd<[f64; 2]>> for isizex2[src]

impl FromCast<Simd<[f64; 2]>> for m128x2[src]

impl FromCast<Simd<[f64; 2]>> for m16x2[src]

impl FromCast<Simd<[f64; 2]>> for m32x2[src]

impl FromCast<Simd<[f64; 2]>> for m64x2[src]

impl FromCast<Simd<[f64; 2]>> for m8x2[src]

impl FromCast<Simd<[f64; 2]>> for msizex2[src]

impl FromCast<Simd<[f64; 2]>> for u128x2[src]

impl FromCast<Simd<[f64; 2]>> for u16x2[src]

impl FromCast<Simd<[f64; 2]>> for u32x2[src]

impl FromCast<Simd<[f64; 2]>> for u64x2[src]

impl FromCast<Simd<[f64; 2]>> for u8x2[src]

impl FromCast<Simd<[f64; 2]>> for usizex2[src]

impl FromCast<Simd<[f64; 4]>> for f32x4[src]

impl FromCast<Simd<[f64; 4]>> for i128x4[src]

impl FromCast<Simd<[f64; 4]>> for i16x4[src]

impl FromCast<Simd<[f64; 4]>> for i32x4[src]

impl FromCast<Simd<[f64; 4]>> for i64x4[src]

impl FromCast<Simd<[f64; 4]>> for i8x4[src]

impl FromCast<Simd<[f64; 4]>> for isizex4[src]

impl FromCast<Simd<[f64; 4]>> for m128x4[src]

impl FromCast<Simd<[f64; 4]>> for m16x4[src]

impl FromCast<Simd<[f64; 4]>> for m32x4[src]

impl FromCast<Simd<[f64; 4]>> for m64x4[src]

impl FromCast<Simd<[f64; 4]>> for m8x4[src]

impl FromCast<Simd<[f64; 4]>> for msizex4[src]

impl FromCast<Simd<[f64; 4]>> for u128x4[src]

impl FromCast<Simd<[f64; 4]>> for u16x4[src]

impl FromCast<Simd<[f64; 4]>> for u32x4[src]

impl FromCast<Simd<[f64; 4]>> for u64x4[src]

impl FromCast<Simd<[f64; 4]>> for u8x4[src]

impl FromCast<Simd<[f64; 4]>> for usizex4[src]

impl FromCast<Simd<[f64; 8]>> for f32x8[src]

impl FromCast<Simd<[f64; 8]>> for i16x8[src]

impl FromCast<Simd<[f64; 8]>> for i32x8[src]

impl FromCast<Simd<[f64; 8]>> for i64x8[src]

impl FromCast<Simd<[f64; 8]>> for i8x8[src]

impl FromCast<Simd<[f64; 8]>> for isizex8[src]

impl FromCast<Simd<[f64; 8]>> for m16x8[src]

impl FromCast<Simd<[f64; 8]>> for m32x8[src]

impl FromCast<Simd<[f64; 8]>> for m64x8[src]

impl FromCast<Simd<[f64; 8]>> for m8x8[src]

impl FromCast<Simd<[f64; 8]>> for msizex8[src]

impl FromCast<Simd<[f64; 8]>> for u16x8[src]

impl FromCast<Simd<[f64; 8]>> for u32x8[src]

impl FromCast<Simd<[f64; 8]>> for u64x8[src]

impl FromCast<Simd<[f64; 8]>> for u8x8[src]

impl FromCast<Simd<[f64; 8]>> for usizex8[src]

impl FromCast<Simd<[i128; 1]>> for m128x1[src]

impl FromCast<Simd<[i128; 1]>> for u128x1[src]

impl FromCast<Simd<[i128; 2]>> for f32x2[src]

impl FromCast<Simd<[i128; 2]>> for f64x2[src]

impl FromCast<Simd<[i128; 2]>> for i16x2[src]

impl FromCast<Simd<[i128; 2]>> for i32x2[src]

impl FromCast<Simd<[i128; 2]>> for i64x2[src]

impl FromCast<Simd<[i128; 2]>> for i8x2[src]

impl FromCast<Simd<[i128; 2]>> for isizex2[src]

impl FromCast<Simd<[i128; 2]>> for m128x2[src]

impl FromCast<Simd<[i128; 2]>> for m16x2[src]

impl FromCast<Simd<[i128; 2]>> for m32x2[src]

impl FromCast<Simd<[i128; 2]>> for m64x2[src]

impl FromCast<Simd<[i128; 2]>> for m8x2[src]

impl FromCast<Simd<[i128; 2]>> for msizex2[src]

impl FromCast<Simd<[i128; 2]>> for u128x2[src]

impl FromCast<Simd<[i128; 2]>> for u16x2[src]

impl FromCast<Simd<[i128; 2]>> for u32x2[src]

impl FromCast<Simd<[i128; 2]>> for u64x2[src]

impl FromCast<Simd<[i128; 2]>> for u8x2[src]

impl FromCast<Simd<[i128; 2]>> for usizex2[src]

impl FromCast<Simd<[i128; 4]>> for f32x4[src]

impl FromCast<Simd<[i128; 4]>> for f64x4[src]

impl FromCast<Simd<[i128; 4]>> for i16x4[src]

impl FromCast<Simd<[i128; 4]>> for i32x4[src]

impl FromCast<Simd<[i128; 4]>> for i64x4[src]

impl FromCast<Simd<[i128; 4]>> for i8x4[src]

impl FromCast<Simd<[i128; 4]>> for isizex4[src]

impl FromCast<Simd<[i128; 4]>> for m128x4[src]

impl FromCast<Simd<[i128; 4]>> for m16x4[src]

impl FromCast<Simd<[i128; 4]>> for m32x4[src]

impl FromCast<Simd<[i128; 4]>> for m64x4[src]

impl FromCast<Simd<[i128; 4]>> for m8x4[src]

impl FromCast<Simd<[i128; 4]>> for msizex4[src]

impl FromCast<Simd<[i128; 4]>> for u128x4[src]

impl FromCast<Simd<[i128; 4]>> for u16x4[src]

impl FromCast<Simd<[i128; 4]>> for u32x4[src]

impl FromCast<Simd<[i128; 4]>> for u64x4[src]

impl FromCast<Simd<[i128; 4]>> for u8x4[src]

impl FromCast<Simd<[i128; 4]>> for usizex4[src]

impl FromCast<Simd<[i16; 16]>> for f32x16[src]

impl FromCast<Simd<[i16; 16]>> for i32x16[src]

impl FromCast<Simd<[i16; 16]>> for i8x16[src]

impl FromCast<Simd<[i16; 16]>> for m16x16[src]

impl FromCast<Simd<[i16; 16]>> for m32x16[src]

impl FromCast<Simd<[i16; 16]>> for m8x16[src]

impl FromCast<Simd<[i16; 16]>> for u16x16[src]

impl FromCast<Simd<[i16; 16]>> for u32x16[src]

impl FromCast<Simd<[i16; 16]>> for u8x16[src]

impl FromCast<Simd<[i16; 2]>> for f32x2[src]

impl FromCast<Simd<[i16; 2]>> for f64x2[src]

impl FromCast<Simd<[i16; 2]>> for i128x2[src]

impl FromCast<Simd<[i16; 2]>> for i32x2[src]

impl FromCast<Simd<[i16; 2]>> for i64x2[src]

impl FromCast<Simd<[i16; 2]>> for i8x2[src]

impl FromCast<Simd<[i16; 2]>> for isizex2[src]

impl FromCast<Simd<[i16; 2]>> for m128x2[src]

impl FromCast<Simd<[i16; 2]>> for m16x2[src]

impl FromCast<Simd<[i16; 2]>> for m32x2[src]

impl FromCast<Simd<[i16; 2]>> for m64x2[src]

impl FromCast<Simd<[i16; 2]>> for m8x2[src]

impl FromCast<Simd<[i16; 2]>> for msizex2[src]

impl FromCast<Simd<[i16; 2]>> for u128x2[src]

impl FromCast<Simd<[i16; 2]>> for u16x2[src]

impl FromCast<Simd<[i16; 2]>> for u32x2[src]

impl FromCast<Simd<[i16; 2]>> for u64x2[src]

impl FromCast<Simd<[i16; 2]>> for u8x2[src]

impl FromCast<Simd<[i16; 2]>> for usizex2[src]

impl FromCast<Simd<[i16; 32]>> for i8x32[src]

impl FromCast<Simd<[i16; 32]>> for m16x32[src]

impl FromCast<Simd<[i16; 32]>> for m8x32[src]

impl FromCast<Simd<[i16; 32]>> for u16x32[src]

impl FromCast<Simd<[i16; 32]>> for u8x32[src]

impl FromCast<Simd<[i16; 4]>> for f32x4[src]

impl FromCast<Simd<[i16; 4]>> for f64x4[src]

impl FromCast<Simd<[i16; 4]>> for i128x4[src]

impl FromCast<Simd<[i16; 4]>> for i32x4[src]

impl FromCast<Simd<[i16; 4]>> for i64x4[src]

impl FromCast<Simd<[i16; 4]>> for i8x4[src]

impl FromCast<Simd<[i16; 4]>> for isizex4[src]

impl FromCast<Simd<[i16; 4]>> for m128x4[src]

impl FromCast<Simd<[i16; 4]>> for m16x4[src]

impl FromCast<Simd<[i16; 4]>> for m32x4[src]

impl FromCast<Simd<[i16; 4]>> for m64x4[src]

impl FromCast<Simd<[i16; 4]>> for m8x4[src]

impl FromCast<Simd<[i16; 4]>> for msizex4[src]

impl FromCast<Simd<[i16; 4]>> for u128x4[src]

impl FromCast<Simd<[i16; 4]>> for u16x4[src]

impl FromCast<Simd<[i16; 4]>> for u32x4[src]

impl FromCast<Simd<[i16; 4]>> for u64x4[src]

impl FromCast<Simd<[i16; 4]>> for u8x4[src]

impl FromCast<Simd<[i16; 4]>> for usizex4[src]

impl FromCast<Simd<[i16; 8]>> for f32x8[src]

impl FromCast<Simd<[i16; 8]>> for f64x8[src]

impl FromCast<Simd<[i16; 8]>> for i32x8[src]

impl FromCast<Simd<[i16; 8]>> for i64x8[src]

impl FromCast<Simd<[i16; 8]>> for i8x8[src]

impl FromCast<Simd<[i16; 8]>> for isizex8[src]

impl FromCast<Simd<[i16; 8]>> for m16x8[src]

impl FromCast<Simd<[i16; 8]>> for m32x8[src]

impl FromCast<Simd<[i16; 8]>> for m64x8[src]

impl FromCast<Simd<[i16; 8]>> for m8x8[src]

impl FromCast<Simd<[i16; 8]>> for msizex8[src]

impl FromCast<Simd<[i16; 8]>> for u16x8[src]

impl FromCast<Simd<[i16; 8]>> for u32x8[src]

impl FromCast<Simd<[i16; 8]>> for u64x8[src]

impl FromCast<Simd<[i16; 8]>> for u8x8[src]

impl FromCast<Simd<[i16; 8]>> for usizex8[src]

impl FromCast<Simd<[i32; 16]>> for f32x16[src]

impl FromCast<Simd<[i32; 16]>> for i16x16[src]

impl FromCast<Simd<[i32; 16]>> for i8x16[src]

impl FromCast<Simd<[i32; 16]>> for m16x16[src]

impl FromCast<Simd<[i32; 16]>> for m32x16[src]

impl FromCast<Simd<[i32; 16]>> for m8x16[src]

impl FromCast<Simd<[i32; 16]>> for u16x16[src]

impl FromCast<Simd<[i32; 16]>> for u32x16[src]

impl FromCast<Simd<[i32; 16]>> for u8x16[src]

impl FromCast<Simd<[i32; 2]>> for f32x2[src]

impl FromCast<Simd<[i32; 2]>> for f64x2[src]

impl FromCast<Simd<[i32; 2]>> for i128x2[src]

impl FromCast<Simd<[i32; 2]>> for i16x2[src]

impl FromCast<Simd<[i32; 2]>> for i64x2[src]

impl FromCast<Simd<[i32; 2]>> for i8x2[src]

impl FromCast<Simd<[i32; 2]>> for isizex2[src]

impl FromCast<Simd<[i32; 2]>> for m128x2[src]

impl FromCast<Simd<[i32; 2]>> for m16x2[src]

impl FromCast<Simd<[i32; 2]>> for m32x2[src]

impl FromCast<Simd<[i32; 2]>> for m64x2[src]

impl FromCast<Simd<[i32; 2]>> for m8x2[src]

impl FromCast<Simd<[i32; 2]>> for msizex2[src]

impl FromCast<Simd<[i32; 2]>> for u128x2[src]

impl FromCast<Simd<[i32; 2]>> for u16x2[src]

impl FromCast<Simd<[i32; 2]>> for u32x2[src]

impl FromCast<Simd<[i32; 2]>> for u64x2[src]

impl FromCast<Simd<[i32; 2]>> for u8x2[src]

impl FromCast<Simd<[i32; 2]>> for usizex2[src]

impl FromCast<Simd<[i32; 4]>> for f32x4[src]

impl FromCast<Simd<[i32; 4]>> for f64x4[src]

impl FromCast<Simd<[i32; 4]>> for i128x4[src]

impl FromCast<Simd<[i32; 4]>> for i16x4[src]

impl FromCast<Simd<[i32; 4]>> for i64x4[src]

impl FromCast<Simd<[i32; 4]>> for i8x4[src]

impl FromCast<Simd<[i32; 4]>> for isizex4[src]

impl FromCast<Simd<[i32; 4]>> for m128x4[src]

impl FromCast<Simd<[i32; 4]>> for m16x4[src]

impl FromCast<Simd<[i32; 4]>> for m32x4[src]

impl FromCast<Simd<[i32; 4]>> for m64x4[src]

impl FromCast<Simd<[i32; 4]>> for m8x4[src]

impl FromCast<Simd<[i32; 4]>> for msizex4[src]

impl FromCast<Simd<[i32; 4]>> for u128x4[src]

impl FromCast<Simd<[i32; 4]>> for u16x4[src]

impl FromCast<Simd<[i32; 4]>> for u32x4[src]

impl FromCast<Simd<[i32; 4]>> for u64x4[src]

impl FromCast<Simd<[i32; 4]>> for u8x4[src]

impl FromCast<Simd<[i32; 4]>> for usizex4[src]

impl FromCast<Simd<[i32; 8]>> for f32x8[src]

impl FromCast<Simd<[i32; 8]>> for f64x8[src]

impl FromCast<Simd<[i32; 8]>> for i16x8[src]

impl FromCast<Simd<[i32; 8]>> for i64x8[src]

impl FromCast<Simd<[i32; 8]>> for i8x8[src]

impl FromCast<Simd<[i32; 8]>> for isizex8[src]

impl FromCast<Simd<[i32; 8]>> for m16x8[src]

impl FromCast<Simd<[i32; 8]>> for m32x8[src]

impl FromCast<Simd<[i32; 8]>> for m64x8[src]

impl FromCast<Simd<[i32; 8]>> for m8x8[src]

impl FromCast<Simd<[i32; 8]>> for msizex8[src]

impl FromCast<Simd<[i32; 8]>> for u16x8[src]

impl FromCast<Simd<[i32; 8]>> for u32x8[src]

impl FromCast<Simd<[i32; 8]>> for u64x8[src]

impl FromCast<Simd<[i32; 8]>> for u8x8[src]

impl FromCast<Simd<[i32; 8]>> for usizex8[src]

impl FromCast<Simd<[i64; 2]>> for f32x2[src]

impl FromCast<Simd<[i64; 2]>> for f64x2[src]

impl FromCast<Simd<[i64; 2]>> for i128x2[src]

impl FromCast<Simd<[i64; 2]>> for i16x2[src]

impl FromCast<Simd<[i64; 2]>> for i32x2[src]

impl FromCast<Simd<[i64; 2]>> for i8x2[src]

impl FromCast<Simd<[i64; 2]>> for isizex2[src]

impl FromCast<Simd<[i64; 2]>> for m128x2[src]

impl FromCast<Simd<[i64; 2]>> for m16x2[src]

impl FromCast<Simd<[i64; 2]>> for m32x2[src]

impl FromCast<Simd<[i64; 2]>> for m64x2[src]

impl FromCast<Simd<[i64; 2]>> for m8x2[src]

impl FromCast<Simd<[i64; 2]>> for msizex2[src]

impl FromCast<Simd<[i64; 2]>> for u128x2[src]

impl FromCast<Simd<[i64; 2]>> for u16x2[src]

impl FromCast<Simd<[i64; 2]>> for u32x2[src]

impl FromCast<Simd<[i64; 2]>> for u64x2[src]

impl FromCast<Simd<[i64; 2]>> for u8x2[src]

impl FromCast<Simd<[i64; 2]>> for usizex2[src]

impl FromCast<Simd<[i64; 4]>> for f32x4[src]

impl FromCast<Simd<[i64; 4]>> for f64x4[src]

impl FromCast<Simd<[i64; 4]>> for i128x4[src]

impl FromCast<Simd<[i64; 4]>> for i16x4[src]

impl FromCast<Simd<[i64; 4]>> for i32x4[src]

impl FromCast<Simd<[i64; 4]>> for i8x4[src]

impl FromCast<Simd<[i64; 4]>> for isizex4[src]

impl FromCast<Simd<[i64; 4]>> for m128x4[src]

impl FromCast<Simd<[i64; 4]>> for m16x4[src]

impl FromCast<Simd<[i64; 4]>> for m32x4[src]

impl FromCast<Simd<[i64; 4]>> for m64x4[src]

impl FromCast<Simd<[i64; 4]>> for m8x4[src]

impl FromCast<Simd<[i64; 4]>> for msizex4[src]

impl FromCast<Simd<[i64; 4]>> for u128x4[src]

impl FromCast<Simd<[i64; 4]>> for u16x4[src]

impl FromCast<Simd<[i64; 4]>> for u32x4[src]

impl FromCast<Simd<[i64; 4]>> for u64x4[src]

impl FromCast<Simd<[i64; 4]>> for u8x4[src]

impl FromCast<Simd<[i64; 4]>> for usizex4[src]

impl FromCast<Simd<[i64; 8]>> for f32x8[src]

impl FromCast<Simd<[i64; 8]>> for f64x8[src]

impl FromCast<Simd<[i64; 8]>> for i16x8[src]

impl FromCast<Simd<[i64; 8]>> for i32x8[src]

impl FromCast<Simd<[i64; 8]>> for i8x8[src]

impl FromCast<Simd<[i64; 8]>> for isizex8[src]

impl FromCast<Simd<[i64; 8]>> for m16x8[src]

impl FromCast<Simd<[i64; 8]>> for m32x8[src]

impl FromCast<Simd<[i64; 8]>> for m64x8[src]

impl FromCast<Simd<[i64; 8]>> for m8x8[src]

impl FromCast<Simd<[i64; 8]>> for msizex8[src]

impl FromCast<Simd<[i64; 8]>> for u16x8[src]

impl FromCast<Simd<[i64; 8]>> for u32x8[src]

impl FromCast<Simd<[i64; 8]>> for u64x8[src]

impl FromCast<Simd<[i64; 8]>> for u8x8[src]

impl FromCast<Simd<[i64; 8]>> for usizex8[src]

impl FromCast<Simd<[i8; 16]>> for f32x16[src]

impl FromCast<Simd<[i8; 16]>> for i16x16[src]

impl FromCast<Simd<[i8; 16]>> for i32x16[src]

impl FromCast<Simd<[i8; 16]>> for m16x16[src]

impl FromCast<Simd<[i8; 16]>> for m32x16[src]

impl FromCast<Simd<[i8; 16]>> for m8x16[src]

impl FromCast<Simd<[i8; 16]>> for u16x16[src]

impl FromCast<Simd<[i8; 16]>> for u32x16[src]

impl FromCast<Simd<[i8; 16]>> for u8x16[src]

impl FromCast<Simd<[i8; 2]>> for f32x2[src]

impl FromCast<Simd<[i8; 2]>> for f64x2[src]

impl FromCast<Simd<[i8; 2]>> for i128x2[src]

impl FromCast<Simd<[i8; 2]>> for i16x2[src]

impl FromCast<Simd<[i8; 2]>> for i32x2[src]

impl FromCast<Simd<[i8; 2]>> for i64x2[src]

impl FromCast<Simd<[i8; 2]>> for isizex2[src]

impl FromCast<Simd<[i8; 2]>> for m128x2[src]

impl FromCast<Simd<[i8; 2]>> for m16x2[src]

impl FromCast<Simd<[i8; 2]>> for m32x2[src]

impl FromCast<Simd<[i8; 2]>> for m64x2[src]

impl FromCast<Simd<[i8; 2]>> for m8x2[src]

impl FromCast<Simd<[i8; 2]>> for msizex2[src]

impl FromCast<Simd<[i8; 2]>> for u128x2[src]

impl FromCast<Simd<[i8; 2]>> for u16x2[src]

impl FromCast<Simd<[i8; 2]>> for u32x2[src]

impl FromCast<Simd<[i8; 2]>> for u64x2[src]

impl FromCast<Simd<[i8; 2]>> for u8x2[src]

impl FromCast<Simd<[i8; 2]>> for usizex2[src]

impl FromCast<Simd<[i8; 32]>> for i16x32[src]

impl FromCast<Simd<[i8; 32]>> for m16x32[src]

impl FromCast<Simd<[i8; 32]>> for m8x32[src]

impl FromCast<Simd<[i8; 32]>> for u16x32[src]

impl FromCast<Simd<[i8; 32]>> for u8x32[src]

impl FromCast<Simd<[i8; 4]>> for f32x4[src]

impl FromCast<Simd<[i8; 4]>> for f64x4[src]

impl FromCast<Simd<[i8; 4]>> for i128x4[src]

impl FromCast<Simd<[i8; 4]>> for i16x4[src]

impl FromCast<Simd<[i8; 4]>> for i32x4[src]

impl FromCast<Simd<[i8; 4]>> for i64x4[src]

impl FromCast<Simd<[i8; 4]>> for isizex4[src]

impl FromCast<Simd<[i8; 4]>> for m128x4[src]

impl FromCast<Simd<[i8; 4]>> for m16x4[src]

impl FromCast<Simd<[i8; 4]>> for m32x4[src]

impl FromCast<Simd<[i8; 4]>> for m64x4[src]

impl FromCast<Simd<[i8; 4]>> for m8x4[src]

impl FromCast<Simd<[i8; 4]>> for msizex4[src]

impl FromCast<Simd<[i8; 4]>> for u128x4[src]

impl FromCast<Simd<[i8; 4]>> for u16x4[src]

impl FromCast<Simd<[i8; 4]>> for u32x4[src]

impl FromCast<Simd<[i8; 4]>> for u64x4[src]

impl FromCast<Simd<[i8; 4]>> for u8x4[src]

impl FromCast<Simd<[i8; 4]>> for usizex4[src]

impl FromCast<Simd<[i8; 64]>> for m8x64[src]

impl FromCast<Simd<[i8; 64]>> for u8x64[src]

impl FromCast<Simd<[i8; 8]>> for f32x8[src]

impl FromCast<Simd<[i8; 8]>> for f64x8[src]

impl FromCast<Simd<[i8; 8]>> for i16x8[src]

impl FromCast<Simd<[i8; 8]>> for i32x8[src]

impl FromCast<Simd<[i8; 8]>> for i64x8[src]

impl FromCast<Simd<[i8; 8]>> for isizex8[src]

impl FromCast<Simd<[i8; 8]>> for m16x8[src]

impl FromCast<Simd<[i8; 8]>> for m32x8[src]

impl FromCast<Simd<[i8; 8]>> for m64x8[src]

impl FromCast<Simd<[i8; 8]>> for m8x8[src]

impl FromCast<Simd<[i8; 8]>> for msizex8[src]

impl FromCast<Simd<[i8; 8]>> for u16x8[src]

impl FromCast<Simd<[i8; 8]>> for u32x8[src]

impl FromCast<Simd<[i8; 8]>> for u64x8[src]

impl FromCast<Simd<[i8; 8]>> for u8x8[src]

impl FromCast<Simd<[i8; 8]>> for usizex8[src]

impl FromCast<Simd<[isize; 2]>> for f32x2[src]

impl FromCast<Simd<[isize; 2]>> for f64x2[src]

impl FromCast<Simd<[isize; 2]>> for i128x2[src]

impl FromCast<Simd<[isize; 2]>> for i16x2[src]

impl FromCast<Simd<[isize; 2]>> for i32x2[src]

impl FromCast<Simd<[isize; 2]>> for i64x2[src]

impl FromCast<Simd<[isize; 2]>> for i8x2[src]

impl FromCast<Simd<[isize; 2]>> for m128x2[src]

impl FromCast<Simd<[isize; 2]>> for m16x2[src]

impl FromCast<Simd<[isize; 2]>> for m32x2[src]

impl FromCast<Simd<[isize; 2]>> for m64x2[src]

impl FromCast<Simd<[isize; 2]>> for m8x2[src]

impl FromCast<Simd<[isize; 2]>> for msizex2[src]

impl FromCast<Simd<[isize; 2]>> for u128x2[src]

impl FromCast<Simd<[isize; 2]>> for u16x2[src]

impl FromCast<Simd<[isize; 2]>> for u32x2[src]

impl FromCast<Simd<[isize; 2]>> for u64x2[src]

impl FromCast<Simd<[isize; 2]>> for u8x2[src]

impl FromCast<Simd<[isize; 2]>> for usizex2[src]

impl FromCast<Simd<[isize; 4]>> for f32x4[src]

impl FromCast<Simd<[isize; 4]>> for f64x4[src]

impl FromCast<Simd<[isize; 4]>> for i128x4[src]

impl FromCast<Simd<[isize; 4]>> for i16x4[src]

impl FromCast<Simd<[isize; 4]>> for i32x4[src]

impl FromCast<Simd<[isize; 4]>> for i64x4[src]

impl FromCast<Simd<[isize; 4]>> for i8x4[src]

impl FromCast<Simd<[isize; 4]>> for m128x4[src]

impl FromCast<Simd<[isize; 4]>> for m16x4[src]

impl FromCast<Simd<[isize; 4]>> for m32x4[src]

impl FromCast<Simd<[isize; 4]>> for m64x4[src]

impl FromCast<Simd<[isize; 4]>> for m8x4[src]

impl FromCast<Simd<[isize; 4]>> for msizex4[src]

impl FromCast<Simd<[isize; 4]>> for u128x4[src]

impl FromCast<Simd<[isize; 4]>> for u16x4[src]

impl FromCast<Simd<[isize; 4]>> for u32x4[src]

impl FromCast<Simd<[isize; 4]>> for u64x4[src]

impl FromCast<Simd<[isize; 4]>> for u8x4[src]

impl FromCast<Simd<[isize; 4]>> for usizex4[src]

impl FromCast<Simd<[isize; 8]>> for f32x8[src]

impl FromCast<Simd<[isize; 8]>> for f64x8[src]

impl FromCast<Simd<[isize; 8]>> for i16x8[src]

impl FromCast<Simd<[isize; 8]>> for i32x8[src]

impl FromCast<Simd<[isize; 8]>> for i64x8[src]

impl FromCast<Simd<[isize; 8]>> for i8x8[src]

impl FromCast<Simd<[isize; 8]>> for m16x8[src]

impl FromCast<Simd<[isize; 8]>> for m32x8[src]

impl FromCast<Simd<[isize; 8]>> for m64x8[src]

impl FromCast<Simd<[isize; 8]>> for m8x8[src]

impl FromCast<Simd<[isize; 8]>> for msizex8[src]

impl FromCast<Simd<[isize; 8]>> for u16x8[src]

impl FromCast<Simd<[isize; 8]>> for u32x8[src]

impl FromCast<Simd<[isize; 8]>> for u64x8[src]

impl FromCast<Simd<[isize; 8]>> for u8x8[src]

impl FromCast<Simd<[isize; 8]>> for usizex8[src]

impl FromCast<Simd<[u128; 1]>> for i128x1[src]

impl FromCast<Simd<[u128; 1]>> for m128x1[src]

impl FromCast<Simd<[u128; 2]>> for f32x2[src]

impl FromCast<Simd<[u128; 2]>> for f64x2[src]

impl FromCast<Simd<[u128; 2]>> for i128x2[src]

impl FromCast<Simd<[u128; 2]>> for i16x2[src]

impl FromCast<Simd<[u128; 2]>> for i32x2[src]

impl FromCast<Simd<[u128; 2]>> for i64x2[src]

impl FromCast<Simd<[u128; 2]>> for i8x2[src]

impl FromCast<Simd<[u128; 2]>> for isizex2[src]

impl FromCast<Simd<[u128; 2]>> for m128x2[src]

impl FromCast<Simd<[u128; 2]>> for m16x2[src]

impl FromCast<Simd<[u128; 2]>> for m32x2[src]

impl FromCast<Simd<[u128; 2]>> for m64x2[src]

impl FromCast<Simd<[u128; 2]>> for m8x2[src]

impl FromCast<Simd<[u128; 2]>> for msizex2[src]

impl FromCast<Simd<[u128; 2]>> for u16x2[src]

impl FromCast<Simd<[u128; 2]>> for u32x2[src]

impl FromCast<Simd<[u128; 2]>> for u64x2[src]

impl FromCast<Simd<[u128; 2]>> for u8x2[src]

impl FromCast<Simd<[u128; 2]>> for usizex2[src]

impl FromCast<Simd<[u128; 4]>> for f32x4[src]

impl FromCast<Simd<[u128; 4]>> for f64x4[src]

impl FromCast<Simd<[u128; 4]>> for i128x4[src]

impl FromCast<Simd<[u128; 4]>> for i16x4[src]

impl FromCast<Simd<[u128; 4]>> for i32x4[src]

impl FromCast<Simd<[u128; 4]>> for i64x4[src]

impl FromCast<Simd<[u128; 4]>> for i8x4[src]

impl FromCast<Simd<[u128; 4]>> for isizex4[src]

impl FromCast<Simd<[u128; 4]>> for m128x4[src]

impl FromCast<Simd<[u128; 4]>> for m16x4[src]

impl FromCast<Simd<[u128; 4]>> for m32x4[src]

impl FromCast<Simd<[u128; 4]>> for m64x4[src]

impl FromCast<Simd<[u128; 4]>> for m8x4[src]

impl FromCast<Simd<[u128; 4]>> for msizex4[src]

impl FromCast<Simd<[u128; 4]>> for u16x4[src]

impl FromCast<Simd<[u128; 4]>> for u32x4[src]

impl FromCast<Simd<[u128; 4]>> for u64x4[src]

impl FromCast<Simd<[u128; 4]>> for u8x4[src]

impl FromCast<Simd<[u128; 4]>> for usizex4[src]

impl FromCast<Simd<[u16; 16]>> for f32x16[src]

impl FromCast<Simd<[u16; 16]>> for i16x16[src]

impl FromCast<Simd<[u16; 16]>> for i32x16[src]

impl FromCast<Simd<[u16; 16]>> for i8x16[src]

impl FromCast<Simd<[u16; 16]>> for m16x16[src]

impl FromCast<Simd<[u16; 16]>> for m32x16[src]

impl FromCast<Simd<[u16; 16]>> for m8x16[src]

impl FromCast<Simd<[u16; 16]>> for u32x16[src]

impl FromCast<Simd<[u16; 16]>> for u8x16[src]

impl FromCast<Simd<[u16; 2]>> for f32x2[src]

impl FromCast<Simd<[u16; 2]>> for f64x2[src]

impl FromCast<Simd<[u16; 2]>> for i128x2[src]

impl FromCast<Simd<[u16; 2]>> for i16x2[src]

impl FromCast<Simd<[u16; 2]>> for i32x2[src]

impl FromCast<Simd<[u16; 2]>> for i64x2[src]

impl FromCast<Simd<[u16; 2]>> for i8x2[src]

impl FromCast<Simd<[u16; 2]>> for isizex2[src]

impl FromCast<Simd<[u16; 2]>> for m128x2[src]

impl FromCast<Simd<[u16; 2]>> for m16x2[src]

impl FromCast<Simd<[u16; 2]>> for m32x2[src]

impl FromCast<Simd<[u16; 2]>> for m64x2[src]

impl FromCast<Simd<[u16; 2]>> for m8x2[src]

impl FromCast<Simd<[u16; 2]>> for msizex2[src]

impl FromCast<Simd<[u16; 2]>> for u128x2[src]

impl FromCast<Simd<[u16; 2]>> for u32x2[src]

impl FromCast<Simd<[u16; 2]>> for u64x2[src]

impl FromCast<Simd<[u16; 2]>> for u8x2[src]

impl FromCast<Simd<[u16; 2]>> for usizex2[src]

impl FromCast<Simd<[u16; 32]>> for i16x32[src]

impl FromCast<Simd<[u16; 32]>> for i8x32[src]

impl FromCast<Simd<[u16; 32]>> for m16x32[src]

impl FromCast<Simd<[u16; 32]>> for m8x32[src]

impl FromCast<Simd<[u16; 32]>> for u8x32[src]

impl FromCast<Simd<[u16; 4]>> for f32x4[src]

impl FromCast<Simd<[u16; 4]>> for f64x4[src]

impl FromCast<Simd<[u16; 4]>> for i128x4[src]

impl FromCast<Simd<[u16; 4]>> for i16x4[src]

impl FromCast<Simd<[u16; 4]>> for i32x4[src]

impl FromCast<Simd<[u16; 4]>> for i64x4[src]

impl FromCast<Simd<[u16; 4]>> for i8x4[src]

impl FromCast<Simd<[u16; 4]>> for isizex4[src]

impl FromCast<Simd<[u16; 4]>> for m128x4[src]

impl FromCast<Simd<[u16; 4]>> for m16x4[src]

impl FromCast<Simd<[u16; 4]>> for m32x4[src]

impl FromCast<Simd<[u16; 4]>> for m64x4[src]

impl FromCast<Simd<[u16; 4]>> for m8x4[src]

impl FromCast<Simd<[u16; 4]>> for msizex4[src]

impl FromCast<Simd<[u16; 4]>> for u128x4[src]

impl FromCast<Simd<[u16; 4]>> for u32x4[src]

impl FromCast<Simd<[u16; 4]>> for u64x4[src]

impl FromCast<Simd<[u16; 4]>> for u8x4[src]

impl FromCast<Simd<[u16; 4]>> for usizex4[src]

impl FromCast<Simd<[u16; 8]>> for f32x8[src]

impl FromCast<Simd<[u16; 8]>> for f64x8[src]

impl FromCast<Simd<[u16; 8]>> for i16x8[src]

impl FromCast<Simd<[u16; 8]>> for i32x8[src]

impl FromCast<Simd<[u16; 8]>> for i64x8[src]

impl FromCast<Simd<[u16; 8]>> for i8x8[src]

impl FromCast<Simd<[u16; 8]>> for isizex8[src]

impl FromCast<Simd<[u16; 8]>> for m16x8[src]

impl FromCast<Simd<[u16; 8]>> for m32x8[src]

impl FromCast<Simd<[u16; 8]>> for m64x8[src]

impl FromCast<Simd<[u16; 8]>> for m8x8[src]

impl FromCast<Simd<[u16; 8]>> for msizex8[src]

impl FromCast<Simd<[u16; 8]>> for u32x8[src]

impl FromCast<Simd<[u16; 8]>> for u64x8[src]

impl FromCast<Simd<[u16; 8]>> for u8x8[src]

impl FromCast<Simd<[u16; 8]>> for usizex8[src]

impl FromCast<Simd<[u32; 16]>> for f32x16[src]

impl FromCast<Simd<[u32; 16]>> for i16x16[src]

impl FromCast<Simd<[u32; 16]>> for i32x16[src]

impl FromCast<Simd<[u32; 16]>> for i8x16[src]

impl FromCast<Simd<[u32; 16]>> for m16x16[src]

impl FromCast<Simd<[u32; 16]>> for m32x16[src]

impl FromCast<Simd<[u32; 16]>> for m8x16[src]

impl FromCast<Simd<[u32; 16]>> for u16x16[src]

impl FromCast<Simd<[u32; 16]>> for u8x16[src]

impl FromCast<Simd<[u32; 2]>> for f32x2[src]

impl FromCast<Simd<[u32; 2]>> for f64x2[src]

impl FromCast<Simd<[u32; 2]>> for i128x2[src]

impl FromCast<Simd<[u32; 2]>> for i16x2[src]

impl FromCast<Simd<[u32; 2]>> for i32x2[src]

impl FromCast<Simd<[u32; 2]>> for i64x2[src]

impl FromCast<Simd<[u32; 2]>> for i8x2[src]

impl FromCast<Simd<[u32; 2]>> for isizex2[src]

impl FromCast<Simd<[u32; 2]>> for m128x2[src]

impl FromCast<Simd<[u32; 2]>> for m16x2[src]

impl FromCast<Simd<[u32; 2]>> for m32x2[src]

impl FromCast<Simd<[u32; 2]>> for m64x2[src]

impl FromCast<Simd<[u32; 2]>> for m8x2[src]

impl FromCast<Simd<[u32; 2]>> for msizex2[src]

impl FromCast<Simd<[u32; 2]>> for u128x2[src]

impl FromCast<Simd<[u32; 2]>> for u16x2[src]

impl FromCast<Simd<[u32; 2]>> for u64x2[src]

impl FromCast<Simd<[u32; 2]>> for u8x2[src]

impl FromCast<Simd<[u32; 2]>> for usizex2[src]

impl FromCast<Simd<[u32; 4]>> for f32x4[src]

impl FromCast<Simd<[u32; 4]>> for f64x4[src]

impl FromCast<Simd<[u32; 4]>> for i128x4[src]

impl FromCast<Simd<[u32; 4]>> for i16x4[src]

impl FromCast<Simd<[u32; 4]>> for i32x4[src]

impl FromCast<Simd<[u32; 4]>> for i64x4[src]

impl FromCast<Simd<[u32; 4]>> for i8x4[src]

impl FromCast<Simd<[u32; 4]>> for isizex4[src]

impl FromCast<Simd<[u32; 4]>> for m128x4[src]

impl FromCast<Simd<[u32; 4]>> for m16x4[src]

impl FromCast<Simd<[u32; 4]>> for m32x4[src]

impl FromCast<Simd<[u32; 4]>> for m64x4[src]

impl FromCast<Simd<[u32; 4]>> for m8x4[src]

impl FromCast<Simd<[u32; 4]>> for msizex4[src]

impl FromCast<Simd<[u32; 4]>> for u128x4[src]

impl FromCast<Simd<[u32; 4]>> for u16x4[src]

impl FromCast<Simd<[u32; 4]>> for u64x4[src]

impl FromCast<Simd<[u32; 4]>> for u8x4[src]

impl FromCast<Simd<[u32; 4]>> for usizex4[src]

impl FromCast<Simd<[u32; 8]>> for f32x8[src]

impl FromCast<Simd<[u32; 8]>> for f64x8[src]

impl FromCast<Simd<[u32; 8]>> for i16x8[src]

impl FromCast<Simd<[u32; 8]>> for i32x8[src]

impl FromCast<Simd<[u32; 8]>> for i64x8[src]

impl FromCast<Simd<[u32; 8]>> for i8x8[src]

impl FromCast<Simd<[u32; 8]>> for isizex8[src]

impl FromCast<Simd<[u32; 8]>> for m16x8[src]

impl FromCast<Simd<[u32; 8]>> for m32x8[src]

impl FromCast<Simd<[u32; 8]>> for m64x8[src]

impl FromCast<Simd<[u32; 8]>> for m8x8[src]

impl FromCast<Simd<[u32; 8]>> for msizex8[src]

impl FromCast<Simd<[u32; 8]>> for u16x8[src]

impl FromCast<Simd<[u32; 8]>> for u64x8[src]

impl FromCast<Simd<[u32; 8]>> for u8x8[src]

impl FromCast<Simd<[u32; 8]>> for usizex8[src]

impl FromCast<Simd<[u64; 2]>> for f32x2[src]

impl FromCast<Simd<[u64; 2]>> for f64x2[src]

impl FromCast<Simd<[u64; 2]>> for i128x2[src]

impl FromCast<Simd<[u64; 2]>> for i16x2[src]

impl FromCast<Simd<[u64; 2]>> for i32x2[src]

impl FromCast<Simd<[u64; 2]>> for i64x2[src]

impl FromCast<Simd<[u64; 2]>> for i8x2[src]

impl FromCast<Simd<[u64; 2]>> for isizex2[src]

impl FromCast<Simd<[u64; 2]>> for m128x2[src]

impl FromCast<Simd<[u64; 2]>> for m16x2[src]

impl FromCast<Simd<[u64; 2]>> for m32x2[src]

impl FromCast<Simd<[u64; 2]>> for m64x2[src]

impl FromCast<Simd<[u64; 2]>> for m8x2[src]

impl FromCast<Simd<[u64; 2]>> for msizex2[src]

impl FromCast<Simd<[u64; 2]>> for u128x2[src]

impl FromCast<Simd<[u64; 2]>> for u16x2[src]

impl FromCast<Simd<[u64; 2]>> for u32x2[src]

impl FromCast<Simd<[u64; 2]>> for u8x2[src]

impl FromCast<Simd<[u64; 2]>> for usizex2[src]

impl FromCast<Simd<[u64; 4]>> for f32x4[src]

impl FromCast<Simd<[u64; 4]>> for f64x4[src]

impl FromCast<Simd<[u64; 4]>> for i128x4[src]

impl FromCast<Simd<[u64; 4]>> for i16x4[src]

impl FromCast<Simd<[u64; 4]>> for i32x4[src]

impl FromCast<Simd<[u64; 4]>> for i64x4[src]

impl FromCast<Simd<[u64; 4]>> for i8x4[src]

impl FromCast<Simd<[u64; 4]>> for isizex4[src]

impl FromCast<Simd<[u64; 4]>> for m128x4[src]

impl FromCast<Simd<[u64; 4]>> for m16x4[src]

impl FromCast<Simd<[u64; 4]>> for m32x4[src]

impl FromCast<Simd<[u64; 4]>> for m64x4[src]

impl FromCast<Simd<[u64; 4]>> for m8x4[src]

impl FromCast<Simd<[u64; 4]>> for msizex4[src]

impl FromCast<Simd<[u64; 4]>> for u128x4[src]

impl FromCast<Simd<[u64; 4]>> for u16x4[src]

impl FromCast<Simd<[u64; 4]>> for u32x4[src]

impl FromCast<Simd<[u64; 4]>> for u8x4[src]

impl FromCast<Simd<[u64; 4]>> for usizex4[src]

impl FromCast<Simd<[u64; 8]>> for f32x8[src]

impl FromCast<Simd<[u64; 8]>> for f64x8[src]

impl FromCast<Simd<[u64; 8]>> for i16x8[src]

impl FromCast<Simd<[u64; 8]>> for i32x8[src]

impl FromCast<Simd<[u64; 8]>> for i64x8[src]

impl FromCast<Simd<[u64; 8]>> for i8x8[src]

impl FromCast<Simd<[u64; 8]>> for isizex8[src]

impl FromCast<Simd<[u64; 8]>> for m16x8[src]

impl FromCast<Simd<[u64; 8]>> for m32x8[src]

impl FromCast<Simd<[u64; 8]>> for m64x8[src]

impl FromCast<Simd<[u64; 8]>> for m8x8[src]

impl FromCast<Simd<[u64; 8]>> for msizex8[src]

impl FromCast<Simd<[u64; 8]>> for u16x8[src]

impl FromCast<Simd<[u64; 8]>> for u32x8[src]

impl FromCast<Simd<[u64; 8]>> for u8x8[src]

impl FromCast<Simd<[u64; 8]>> for usizex8[src]

impl FromCast<Simd<[u8; 16]>> for f32x16[src]

impl FromCast<Simd<[u8; 16]>> for i16x16[src]

impl FromCast<Simd<[u8; 16]>> for i32x16[src]

impl FromCast<Simd<[u8; 16]>> for i8x16[src]

impl FromCast<Simd<[u8; 16]>> for m16x16[src]

impl FromCast<Simd<[u8; 16]>> for m32x16[src]

impl FromCast<Simd<[u8; 16]>> for m8x16[src]

impl FromCast<Simd<[u8; 16]>> for u16x16[src]

impl FromCast<Simd<[u8; 16]>> for u32x16[src]

impl FromCast<Simd<[u8; 2]>> for f32x2[src]

impl FromCast<Simd<[u8; 2]>> for f64x2[src]

impl FromCast<Simd<[u8; 2]>> for i128x2[src]

impl FromCast<Simd<[u8; 2]>> for i16x2[src]

impl FromCast<Simd<[u8; 2]>> for i32x2[src]

impl FromCast<Simd<[u8; 2]>> for i64x2[src]

impl FromCast<Simd<[u8; 2]>> for i8x2[src]

impl FromCast<Simd<[u8; 2]>> for isizex2[src]

impl FromCast<Simd<[u8; 2]>> for m128x2[src]

impl FromCast<Simd<[u8; 2]>> for m16x2[src]

impl FromCast<Simd<[u8; 2]>> for m32x2[src]

impl FromCast<Simd<[u8; 2]>> for m64x2[src]

impl FromCast<Simd<[u8; 2]>> for m8x2[src]

impl FromCast<Simd<[u8; 2]>> for msizex2[src]

impl FromCast<Simd<[u8; 2]>> for u128x2[src]

impl FromCast<Simd<[u8; 2]>> for u16x2[src]

impl FromCast<Simd<[u8; 2]>> for u32x2[src]

impl FromCast<Simd<[u8; 2]>> for u64x2[src]

impl FromCast<Simd<[u8; 2]>> for usizex2[src]

impl FromCast<Simd<[u8; 32]>> for i16x32[src]

impl FromCast<Simd<[u8; 32]>> for i8x32[src]

impl FromCast<Simd<[u8; 32]>> for m16x32[src]

impl FromCast<Simd<[u8; 32]>> for m8x32[src]

impl FromCast<Simd<[u8; 32]>> for u16x32[src]

impl FromCast<Simd<[u8; 4]>> for f32x4[src]

impl FromCast<Simd<[u8; 4]>> for f64x4[src]

impl FromCast<Simd<[u8; 4]>> for i128x4[src]

impl FromCast<Simd<[u8; 4]>> for i16x4[src]

impl FromCast<Simd<[u8; 4]>> for i32x4[src]

impl FromCast<Simd<[u8; 4]>> for i64x4[src]

impl FromCast<Simd<[u8; 4]>> for i8x4[src]

impl FromCast<Simd<[u8; 4]>> for isizex4[src]

impl FromCast<Simd<[u8; 4]>> for m128x4[src]

impl FromCast<Simd<[u8; 4]>> for m16x4[src]

impl FromCast<Simd<[u8; 4]>> for m32x4[src]

impl FromCast<Simd<[u8; 4]>> for m64x4[src]

impl FromCast<Simd<[u8; 4]>> for m8x4[src]

impl FromCast<Simd<[u8; 4]>> for msizex4[src]

impl FromCast<Simd<[u8; 4]>> for u128x4[src]

impl FromCast<Simd<[u8; 4]>> for u16x4[src]

impl FromCast<Simd<[u8; 4]>> for u32x4[src]

impl FromCast<Simd<[u8; 4]>> for u64x4[src]

impl FromCast<Simd<[u8; 4]>> for usizex4[src]

impl FromCast<Simd<[u8; 64]>> for i8x64[src]

impl FromCast<Simd<[u8; 64]>> for m8x64[src]

impl FromCast<Simd<[u8; 8]>> for f32x8[src]

impl FromCast<Simd<[u8; 8]>> for f64x8[src]

impl FromCast<Simd<[u8; 8]>> for i16x8[src]

impl FromCast<Simd<[u8; 8]>> for i32x8[src]

impl FromCast<Simd<[u8; 8]>> for i64x8[src]

impl FromCast<Simd<[u8; 8]>> for i8x8[src]

impl FromCast<Simd<[u8; 8]>> for isizex8[src]

impl FromCast<Simd<[u8; 8]>> for m16x8[src]

impl FromCast<Simd<[u8; 8]>> for m32x8[src]

impl FromCast<Simd<[u8; 8]>> for m64x8[src]

impl FromCast<Simd<[u8; 8]>> for m8x8[src]

impl FromCast<Simd<[u8; 8]>> for msizex8[src]

impl FromCast<Simd<[u8; 8]>> for u16x8[src]

impl FromCast<Simd<[u8; 8]>> for u32x8[src]

impl FromCast<Simd<[u8; 8]>> for u64x8[src]

impl FromCast<Simd<[u8; 8]>> for usizex8[src]

impl FromCast<Simd<[usize; 2]>> for f32x2[src]

impl FromCast<Simd<[usize; 2]>> for f64x2[src]

impl FromCast<Simd<[usize; 2]>> for i128x2[src]

impl FromCast<Simd<[usize; 2]>> for i16x2[src]

impl FromCast<Simd<[usize; 2]>> for i32x2[src]

impl FromCast<Simd<[usize; 2]>> for i64x2[src]

impl FromCast<Simd<[usize; 2]>> for i8x2[src]

impl FromCast<Simd<[usize; 2]>> for isizex2[src]

impl FromCast<Simd<[usize; 2]>> for m128x2[src]

impl FromCast<Simd<[usize; 2]>> for m16x2[src]

impl FromCast<Simd<[usize; 2]>> for m32x2[src]

impl FromCast<Simd<[usize; 2]>> for m64x2[src]

impl FromCast<Simd<[usize; 2]>> for m8x2[src]

impl FromCast<Simd<[usize; 2]>> for msizex2[src]

impl FromCast<Simd<[usize; 2]>> for u128x2[src]

impl FromCast<Simd<[usize; 2]>> for u16x2[src]

impl FromCast<Simd<[usize; 2]>> for u32x2[src]

impl FromCast<Simd<[usize; 2]>> for u64x2[src]

impl FromCast<Simd<[usize; 2]>> for u8x2[src]

impl FromCast<Simd<[usize; 4]>> for f32x4[src]

impl FromCast<Simd<[usize; 4]>> for f64x4[src]

impl FromCast<Simd<[usize; 4]>> for i128x4[src]

impl FromCast<Simd<[usize; 4]>> for i16x4[src]

impl FromCast<Simd<[usize; 4]>> for i32x4[src]

impl FromCast<Simd<[usize; 4]>> for i64x4[src]

impl FromCast<Simd<[usize; 4]>> for i8x4[src]

impl FromCast<Simd<[usize; 4]>> for isizex4[src]

impl FromCast<Simd<[usize; 4]>> for m128x4[src]

impl FromCast<Simd<[usize; 4]>> for m16x4[src]

impl FromCast<Simd<[usize; 4]>> for m32x4[src]

impl FromCast<Simd<[usize; 4]>> for m64x4[src]

impl FromCast<Simd<[usize; 4]>> for m8x4[src]

impl FromCast<Simd<[usize; 4]>> for msizex4[src]

impl FromCast<Simd<[usize; 4]>> for u128x4[src]

impl FromCast<Simd<[usize; 4]>> for u16x4[src]

impl FromCast<Simd<[usize; 4]>> for u32x4[src]

impl FromCast<Simd<[usize; 4]>> for u64x4[src]

impl FromCast<Simd<[usize; 4]>> for u8x4[src]

impl FromCast<Simd<[usize; 8]>> for f32x8[src]

impl FromCast<Simd<[usize; 8]>> for f64x8[src]

impl FromCast<Simd<[usize; 8]>> for i16x8[src]

impl FromCast<Simd<[usize; 8]>> for i32x8[src]

impl FromCast<Simd<[usize; 8]>> for i64x8[src]

impl FromCast<Simd<[usize; 8]>> for i8x8[src]

impl FromCast<Simd<[usize; 8]>> for isizex8[src]

impl FromCast<Simd<[usize; 8]>> for m16x8[src]

impl FromCast<Simd<[usize; 8]>> for m32x8[src]

impl FromCast<Simd<[usize; 8]>> for m64x8[src]

impl FromCast<Simd<[usize; 8]>> for m8x8[src]

impl FromCast<Simd<[usize; 8]>> for msizex8[src]

impl FromCast<Simd<[usize; 8]>> for u16x8[src]

impl FromCast<Simd<[usize; 8]>> for u32x8[src]

impl FromCast<Simd<[usize; 8]>> for u64x8[src]

impl FromCast<Simd<[usize; 8]>> for u8x8[src]

impl<T> FromCast<T> for T[src]

FromCast and Cast are reflexive

+
Loading content...
\ No newline at end of file diff --git a/packed_simd/trait.IntoBits.html b/packed_simd/trait.IntoBits.html new file mode 100644 index 000000000..f966a1726 --- /dev/null +++ b/packed_simd/trait.IntoBits.html @@ -0,0 +1,8 @@ +packed_simd::IntoBits - Rust

[][src]Trait packed_simd::IntoBits

pub trait IntoBits<T>: Sized {
+    fn into_bits(self) -> T;
+}

Safe lossless bitwise conversion from Self to T.

+
+

Required methods

fn into_bits(self) -> T

Safe lossless bitwise transmute from self to T.

+
Loading content... +

Implementors

impl<T, U> IntoBits<U> for T where
    U: FromBits<T>, 
[src]

FromBits implies IntoBits.

+
Loading content...
\ No newline at end of file diff --git a/packed_simd/trait.Mask.html b/packed_simd/trait.Mask.html new file mode 100644 index 000000000..b6e4f4835 --- /dev/null +++ b/packed_simd/trait.Mask.html @@ -0,0 +1,6 @@ +packed_simd::Mask - Rust

[][src]Trait packed_simd::Mask

pub trait Mask: Seal {
+    fn test(&self) -> bool;
+}

This trait is implemented by all mask types

+
+

Required methods

fn test(&self) -> bool

Loading content... +

Implementors

impl Mask for m128[src]

impl Mask for m16[src]

impl Mask for m32[src]

impl Mask for m64[src]

impl Mask for m8[src]

impl Mask for msize[src]

Loading content...
\ No newline at end of file diff --git a/packed_simd/trait.SimdArray.html b/packed_simd/trait.SimdArray.html new file mode 100644 index 000000000..643ebf99b --- /dev/null +++ b/packed_simd/trait.SimdArray.html @@ -0,0 +1,15 @@ +packed_simd::SimdArray - Rust

[][src]Trait packed_simd::SimdArray

pub trait SimdArray: Seal {
+    type Tuple: Copy + Clone;
+    type T;
+    type NT;
+
+    const N: usize;
+}

Trait implemented by arrays that can be SIMD types.

+
+

Associated Types

type Tuple: Copy + Clone

The type of the #[repr(simd)] type.

+

type T

The element type of the vector.

+

type NT

The type: [u32; Self::N].

+
Loading content... +

Associated Constants

const N: usize

The number of elements in the array.

+
Loading content... +

Implementors

impl SimdArray for [m128; 1][src]

type Tuple = m128x1

type T = m128

type NT = [u32; 1]

impl SimdArray for [m128; 2][src]

type Tuple = m128x2

type T = m128

type NT = [u32; 2]

impl SimdArray for [m128; 4][src]

type Tuple = m128x4

type T = m128

type NT = [u32; 4]

impl SimdArray for [m16; 2][src]

type Tuple = m16x2

type T = m16

type NT = [u32; 2]

impl SimdArray for [m16; 4][src]

type Tuple = m16x4

type T = m16

type NT = [u32; 4]

impl SimdArray for [m16; 8][src]

type Tuple = m16x8

type T = m16

type NT = [u32; 8]

impl SimdArray for [m16; 16][src]

type Tuple = m16x16

type T = m16

type NT = [u32; 16]

impl SimdArray for [m16; 32][src]

type Tuple = m16x32

type T = m16

type NT = [u32; 32]

impl SimdArray for [m32; 2][src]

type Tuple = m32x2

type T = m32

type NT = [u32; 2]

impl SimdArray for [m32; 4][src]

type Tuple = m32x4

type T = m32

type NT = [u32; 4]

impl SimdArray for [m32; 8][src]

type Tuple = m32x8

type T = m32

type NT = [u32; 8]

impl SimdArray for [m32; 16][src]

type Tuple = m32x16

type T = m32

type NT = [u32; 16]

impl SimdArray for [m64; 1][src]

type Tuple = m64x1

type T = m64

type NT = [u32; 1]

impl SimdArray for [m64; 2][src]

type Tuple = m64x2

type T = m64

type NT = [u32; 2]

impl SimdArray for [m64; 4][src]

type Tuple = m64x4

type T = m64

type NT = [u32; 4]

impl SimdArray for [m64; 8][src]

type Tuple = m64x8

type T = m64

type NT = [u32; 8]

impl SimdArray for [m8; 2][src]

type Tuple = m8x2

type T = m8

type NT = [u32; 2]

impl SimdArray for [m8; 4][src]

type Tuple = m8x4

type T = m8

type NT = [u32; 4]

impl SimdArray for [m8; 8][src]

type Tuple = m8x8

type T = m8

type NT = [u32; 8]

impl SimdArray for [m8; 16][src]

type Tuple = m8x16

type T = m8

type NT = [u32; 16]

impl SimdArray for [m8; 32][src]

type Tuple = m8x32

type T = m8

type NT = [u32; 32]

impl SimdArray for [m8; 64][src]

type Tuple = m8x64

type T = m8

type NT = [u32; 64]

impl SimdArray for [msize; 2][src]

type Tuple = msizex2

type T = msize

type NT = [u32; 2]

impl SimdArray for [msize; 4][src]

type Tuple = msizex4

type T = msize

type NT = [u32; 4]

impl SimdArray for [msize; 8][src]

type Tuple = msizex8

type T = msize

type NT = [u32; 8]

impl SimdArray for [f32; 2][src]

type Tuple = f32x2

type T = f32

type NT = [u32; 2]

impl SimdArray for [f32; 4][src]

type Tuple = f32x4

type T = f32

type NT = [u32; 4]

impl SimdArray for [f32; 8][src]

type Tuple = f32x8

type T = f32

type NT = [u32; 8]

impl SimdArray for [f32; 16][src]

type Tuple = f32x16

type T = f32

type NT = [u32; 16]

impl SimdArray for [f64; 1][src]

type Tuple = f64x1

type T = f64

type NT = [u32; 1]

impl SimdArray for [f64; 2][src]

type Tuple = f64x2

type T = f64

type NT = [u32; 2]

impl SimdArray for [f64; 4][src]

type Tuple = f64x4

type T = f64

type NT = [u32; 4]

impl SimdArray for [f64; 8][src]

type Tuple = f64x8

type T = f64

type NT = [u32; 8]

impl SimdArray for [i128; 1][src]

type Tuple = i128x1

type T = i128

type NT = [u32; 1]

impl SimdArray for [i128; 2][src]

type Tuple = i128x2

type T = i128

type NT = [u32; 2]

impl SimdArray for [i128; 4][src]

type Tuple = i128x4

type T = i128

type NT = [u32; 4]

impl SimdArray for [i16; 2][src]

type Tuple = i16x2

type T = i16

type NT = [u32; 2]

impl SimdArray for [i16; 4][src]

type Tuple = i16x4

type T = i16

type NT = [u32; 4]

impl SimdArray for [i16; 8][src]

type Tuple = i16x8

type T = i16

type NT = [u32; 8]

impl SimdArray for [i16; 16][src]

type Tuple = i16x16

type T = i16

type NT = [u32; 16]

impl SimdArray for [i16; 32][src]

type Tuple = i16x32

type T = i16

type NT = [u32; 32]

impl SimdArray for [i32; 2][src]

type Tuple = i32x2

type T = i32

type NT = [u32; 2]

impl SimdArray for [i32; 4][src]

type Tuple = i32x4

type T = i32

type NT = [u32; 4]

impl SimdArray for [i32; 8][src]

type Tuple = i32x8

type T = i32

type NT = [u32; 8]

impl SimdArray for [i32; 16][src]

type Tuple = i32x16

type T = i32

type NT = [u32; 16]

impl SimdArray for [i64; 1][src]

type Tuple = i64x1

type T = i64

type NT = [u32; 1]

impl SimdArray for [i64; 2][src]

type Tuple = i64x2

type T = i64

type NT = [u32; 2]

impl SimdArray for [i64; 4][src]

type Tuple = i64x4

type T = i64

type NT = [u32; 4]

impl SimdArray for [i64; 8][src]

type Tuple = i64x8

type T = i64

type NT = [u32; 8]

impl SimdArray for [i8; 2][src]

type Tuple = i8x2

type T = i8

type NT = [u32; 2]

impl SimdArray for [i8; 4][src]

type Tuple = i8x4

type T = i8

type NT = [u32; 4]

impl SimdArray for [i8; 8][src]

type Tuple = i8x8

type T = i8

type NT = [u32; 8]

impl SimdArray for [i8; 16][src]

type Tuple = i8x16

type T = i8

type NT = [u32; 16]

impl SimdArray for [i8; 32][src]

type Tuple = i8x32

type T = i8

type NT = [u32; 32]

impl SimdArray for [i8; 64][src]

type Tuple = i8x64

type T = i8

type NT = [u32; 64]

impl SimdArray for [isize; 2][src]

type Tuple = isizex2

type T = isize

type NT = [u32; 2]

impl SimdArray for [isize; 4][src]

type Tuple = isizex4

type T = isize

type NT = [u32; 4]

impl SimdArray for [isize; 8][src]

type Tuple = isizex8

type T = isize

type NT = [u32; 8]

impl SimdArray for [u128; 1][src]

type Tuple = u128x1

type T = u128

type NT = [u32; 1]

impl SimdArray for [u128; 2][src]

type Tuple = u128x2

type T = u128

type NT = [u32; 2]

impl SimdArray for [u128; 4][src]

type Tuple = u128x4

type T = u128

type NT = [u32; 4]

impl SimdArray for [u16; 2][src]

type Tuple = u16x2

type T = u16

type NT = [u32; 2]

impl SimdArray for [u16; 4][src]

type Tuple = u16x4

type T = u16

type NT = [u32; 4]

impl SimdArray for [u16; 8][src]

type Tuple = u16x8

type T = u16

type NT = [u32; 8]

impl SimdArray for [u16; 16][src]

type Tuple = u16x16

type T = u16

type NT = [u32; 16]

impl SimdArray for [u16; 32][src]

type Tuple = u16x32

type T = u16

type NT = [u32; 32]

impl SimdArray for [u32; 2][src]

type Tuple = u32x2

type T = u32

type NT = [u32; 2]

impl SimdArray for [u32; 4][src]

type Tuple = u32x4

type T = u32

type NT = [u32; 4]

impl SimdArray for [u32; 8][src]

type Tuple = u32x8

type T = u32

type NT = [u32; 8]

impl SimdArray for [u32; 16][src]

type Tuple = u32x16

type T = u32

type NT = [u32; 16]

impl SimdArray for [u64; 1][src]

type Tuple = u64x1

type T = u64

type NT = [u32; 1]

impl SimdArray for [u64; 2][src]

type Tuple = u64x2

type T = u64

type NT = [u32; 2]

impl SimdArray for [u64; 4][src]

type Tuple = u64x4

type T = u64

type NT = [u32; 4]

impl SimdArray for [u64; 8][src]

type Tuple = u64x8

type T = u64

type NT = [u32; 8]

impl SimdArray for [u8; 2][src]

type Tuple = u8x2

type T = u8

type NT = [u32; 2]

impl SimdArray for [u8; 4][src]

type Tuple = u8x4

type T = u8

type NT = [u32; 4]

impl SimdArray for [u8; 8][src]

type Tuple = u8x8

type T = u8

type NT = [u32; 8]

impl SimdArray for [u8; 16][src]

type Tuple = u8x16

type T = u8

type NT = [u32; 16]

impl SimdArray for [u8; 32][src]

type Tuple = u8x32

type T = u8

type NT = [u32; 32]

impl SimdArray for [u8; 64][src]

type Tuple = u8x64

type T = u8

type NT = [u32; 64]

impl SimdArray for [usize; 2][src]

type Tuple = usizex2

type T = usize

type NT = [u32; 2]

impl SimdArray for [usize; 4][src]

type Tuple = usizex4

type T = usize

type NT = [u32; 4]

impl SimdArray for [usize; 8][src]

type Tuple = usizex8

type T = usize

type NT = [u32; 8]

impl<T> SimdArray for [*const T; 2][src]

type Tuple = cptrx2<*const T>

type T = *const T

type NT = [u32; 2]

impl<T> SimdArray for [*const T; 4][src]

type Tuple = cptrx4<*const T>

type T = *const T

type NT = [u32; 4]

impl<T> SimdArray for [*const T; 8][src]

type Tuple = cptrx8<*const T>

type T = *const T

type NT = [u32; 8]

impl<T> SimdArray for [*mut T; 2][src]

type Tuple = mptrx2<*mut T>

type T = *mut T

type NT = [u32; 2]

impl<T> SimdArray for [*mut T; 4][src]

type Tuple = mptrx4<*mut T>

type T = *mut T

type NT = [u32; 4]

impl<T> SimdArray for [*mut T; 8][src]

type Tuple = mptrx8<*mut T>

type T = *mut T

type NT = [u32; 8]

Loading content...
\ No newline at end of file diff --git a/packed_simd/trait.SimdVector.html b/packed_simd/trait.SimdVector.html new file mode 100644 index 000000000..1d9af5beb --- /dev/null +++ b/packed_simd/trait.SimdVector.html @@ -0,0 +1,13 @@ +packed_simd::SimdVector - Rust

[][src]Trait packed_simd::SimdVector

pub trait SimdVector: Seal {
+    type Element;
+    type LanesType;
+
+    const LANES: usize;
+}

This trait is implemented by all SIMD vector types.

+
+

Associated Types

type Element

Element type of the SIMD vector

+

type LanesType

The type: [u32; Self::N].

+
Loading content... +

Associated Constants

const LANES: usize

The number of elements in the SIMD vector.

+
Loading content... +

Implementors

impl Simd for f32x16[src]

type Element = f32

type LanesType = [u32; 16]

impl Simd for f32x2[src]

type Element = f32

type LanesType = [u32; 2]

impl Simd for f32x4[src]

type Element = f32

type LanesType = [u32; 4]

impl Simd for f32x8[src]

type Element = f32

type LanesType = [u32; 8]

impl Simd for f64x2[src]

type Element = f64

type LanesType = [u32; 2]

impl Simd for f64x4[src]

type Element = f64

type LanesType = [u32; 4]

impl Simd for f64x8[src]

type Element = f64

type LanesType = [u32; 8]

impl Simd for i128x1[src]

type Element = i128

type LanesType = [u32; 1]

impl Simd for i128x2[src]

type Element = i128

type LanesType = [u32; 2]

impl Simd for i128x4[src]

type Element = i128

type LanesType = [u32; 4]

impl Simd for i16x16[src]

type Element = i16

type LanesType = [u32; 16]

impl Simd for i16x2[src]

type Element = i16

type LanesType = [u32; 2]

impl Simd for i16x32[src]

type Element = i16

type LanesType = [u32; 32]

impl Simd for i16x4[src]

type Element = i16

type LanesType = [u32; 4]

impl Simd for i16x8[src]

type Element = i16

type LanesType = [u32; 8]

impl Simd for i32x16[src]

type Element = i32

type LanesType = [u32; 16]

impl Simd for i32x2[src]

type Element = i32

type LanesType = [u32; 2]

impl Simd for i32x4[src]

type Element = i32

type LanesType = [u32; 4]

impl Simd for i32x8[src]

type Element = i32

type LanesType = [u32; 8]

impl Simd for i64x2[src]

type Element = i64

type LanesType = [u32; 2]

impl Simd for i64x4[src]

type Element = i64

type LanesType = [u32; 4]

impl Simd for i64x8[src]

type Element = i64

type LanesType = [u32; 8]

impl Simd for i8x16[src]

type Element = i8

type LanesType = [u32; 16]

impl Simd for i8x2[src]

type Element = i8

type LanesType = [u32; 2]

impl Simd for i8x32[src]

type Element = i8

type LanesType = [u32; 32]

impl Simd for i8x4[src]

type Element = i8

type LanesType = [u32; 4]

impl Simd for i8x64[src]

type Element = i8

type LanesType = [u32; 64]

impl Simd for i8x8[src]

type Element = i8

type LanesType = [u32; 8]

impl Simd for isizex2[src]

type Element = isize

type LanesType = [u32; 2]

impl Simd for isizex4[src]

type Element = isize

type LanesType = [u32; 4]

impl Simd for isizex8[src]

type Element = isize

type LanesType = [u32; 8]

impl Simd for m128x1[src]

type Element = m128

type LanesType = [u32; 1]

impl Simd for m128x2[src]

type Element = m128

type LanesType = [u32; 2]

impl Simd for m128x4[src]

type Element = m128

type LanesType = [u32; 4]

impl Simd for m16x16[src]

type Element = m16

type LanesType = [u32; 16]

impl Simd for m16x2[src]

type Element = m16

type LanesType = [u32; 2]

impl Simd for m16x32[src]

type Element = m16

type LanesType = [u32; 32]

impl Simd for m16x4[src]

type Element = m16

type LanesType = [u32; 4]

impl Simd for m16x8[src]

type Element = m16

type LanesType = [u32; 8]

impl Simd for m32x16[src]

type Element = m32

type LanesType = [u32; 16]

impl Simd for m32x2[src]

type Element = m32

type LanesType = [u32; 2]

impl Simd for m32x4[src]

type Element = m32

type LanesType = [u32; 4]

impl Simd for m32x8[src]

type Element = m32

type LanesType = [u32; 8]

impl Simd for m64x2[src]

type Element = m64

type LanesType = [u32; 2]

impl Simd for m64x4[src]

type Element = m64

type LanesType = [u32; 4]

impl Simd for m64x8[src]

type Element = m64

type LanesType = [u32; 8]

impl Simd for m8x16[src]

type Element = m8

type LanesType = [u32; 16]

impl Simd for m8x2[src]

type Element = m8

type LanesType = [u32; 2]

impl Simd for m8x32[src]

type Element = m8

type LanesType = [u32; 32]

impl Simd for m8x4[src]

type Element = m8

type LanesType = [u32; 4]

impl Simd for m8x64[src]

type Element = m8

type LanesType = [u32; 64]

impl Simd for m8x8[src]

type Element = m8

type LanesType = [u32; 8]

impl Simd for msizex2[src]

type Element = msize

type LanesType = [u32; 2]

impl Simd for msizex4[src]

type Element = msize

type LanesType = [u32; 4]

impl Simd for msizex8[src]

type Element = msize

type LanesType = [u32; 8]

impl Simd for u128x1[src]

type Element = u128

type LanesType = [u32; 1]

impl Simd for u128x2[src]

type Element = u128

type LanesType = [u32; 2]

impl Simd for u128x4[src]

type Element = u128

type LanesType = [u32; 4]

impl Simd for u16x16[src]

type Element = u16

type LanesType = [u32; 16]

impl Simd for u16x2[src]

type Element = u16

type LanesType = [u32; 2]

impl Simd for u16x32[src]

type Element = u16

type LanesType = [u32; 32]

impl Simd for u16x4[src]

type Element = u16

type LanesType = [u32; 4]

impl Simd for u16x8[src]

type Element = u16

type LanesType = [u32; 8]

impl Simd for u32x16[src]

type Element = u32

type LanesType = [u32; 16]

impl Simd for u32x2[src]

type Element = u32

type LanesType = [u32; 2]

impl Simd for u32x4[src]

type Element = u32

type LanesType = [u32; 4]

impl Simd for u32x8[src]

type Element = u32

type LanesType = [u32; 8]

impl Simd for u64x2[src]

type Element = u64

type LanesType = [u32; 2]

impl Simd for u64x4[src]

type Element = u64

type LanesType = [u32; 4]

impl Simd for u64x8[src]

type Element = u64

type LanesType = [u32; 8]

impl Simd for u8x16[src]

type Element = u8

type LanesType = [u32; 16]

impl Simd for u8x2[src]

type Element = u8

type LanesType = [u32; 2]

impl Simd for u8x32[src]

type Element = u8

type LanesType = [u32; 32]

impl Simd for u8x4[src]

type Element = u8

type LanesType = [u32; 4]

impl Simd for u8x64[src]

type Element = u8

type LanesType = [u32; 64]

impl Simd for u8x8[src]

type Element = u8

type LanesType = [u32; 8]

impl Simd for usizex2[src]

type Element = usize

type LanesType = [u32; 2]

impl Simd for usizex4[src]

type Element = usize

type LanesType = [u32; 4]

impl Simd for usizex8[src]

type Element = usize

type LanesType = [u32; 8]

impl<T> Simd for cptrx2<T>[src]

type Element = *const T

type LanesType = [u32; 2]

impl<T> Simd for cptrx4<T>[src]

type Element = *const T

type LanesType = [u32; 4]

impl<T> Simd for cptrx8<T>[src]

type Element = *const T

type LanesType = [u32; 8]

impl<T> Simd for mptrx2<T>[src]

type Element = *mut T

type LanesType = [u32; 2]

impl<T> Simd for mptrx4<T>[src]

type Element = *mut T

type LanesType = [u32; 4]

impl<T> Simd for mptrx8<T>[src]

type Element = *mut T

type LanesType = [u32; 8]

Loading content...
\ No newline at end of file diff --git a/packed_simd/type.cptrx2.html b/packed_simd/type.cptrx2.html new file mode 100644 index 000000000..a95b6d421 --- /dev/null +++ b/packed_simd/type.cptrx2.html @@ -0,0 +1,274 @@ +packed_simd::cptrx2 - Rust

[][src]Type Definition packed_simd::cptrx2

type cptrx2<T> = Simd<[*const T; 2]>;

A vector with 2 *const T lanes

+

Implementations

impl<T> cptrx2<T>[src]

pub const fn new(x0: *const T, x1: *const T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex2[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> cptrx2<T>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> cptrx2<T>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> cptrx2<T>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> cptrx2<T>[src]

pub unsafe fn offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> cptrx2<T>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> cptrx2<T> where
    [T; 2]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 2]>,
    value: Simd<[T; 2]>
) -> Simd<[T; 2]> where
    M: Mask,
    [M; 2]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

Trait Implementations

impl<T> Debug for cptrx2<T>[src]

impl<T> Default for cptrx2<T>[src]

impl<T> Eq for cptrx2<T>[src]

impl<T> From<[*const T; 2]> for cptrx2<T>[src]

impl<T> Hash for cptrx2<T>[src]

impl<T> Into<[*const T; 2]> for cptrx2<T>[src]

impl<T> PartialEq<Simd<[*const T; 2]>> for cptrx2<T>[src]

impl<T> Simd for cptrx2<T>[src]

type Element = *const T

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.cptrx4.html b/packed_simd/type.cptrx4.html new file mode 100644 index 000000000..5fa171caf --- /dev/null +++ b/packed_simd/type.cptrx4.html @@ -0,0 +1,274 @@ +packed_simd::cptrx4 - Rust

[][src]Type Definition packed_simd::cptrx4

type cptrx4<T> = Simd<[*const T; 4]>;

A vector with 4 *const T lanes

+

Implementations

impl<T> cptrx4<T>[src]

pub const fn new(x0: *const T, x1: *const T, x2: *const T, x3: *const T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex4[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> cptrx4<T>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> cptrx4<T>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> cptrx4<T>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> cptrx4<T>[src]

pub unsafe fn offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> cptrx4<T>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> cptrx4<T> where
    [T; 4]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 4]>,
    value: Simd<[T; 4]>
) -> Simd<[T; 4]> where
    M: Mask,
    [M; 4]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

Trait Implementations

impl<T> Debug for cptrx4<T>[src]

impl<T> Default for cptrx4<T>[src]

impl<T> Eq for cptrx4<T>[src]

impl<T> From<[*const T; 4]> for cptrx4<T>[src]

impl<T> Hash for cptrx4<T>[src]

impl<T> Into<[*const T; 4]> for cptrx4<T>[src]

impl<T> PartialEq<Simd<[*const T; 4]>> for cptrx4<T>[src]

impl<T> Simd for cptrx4<T>[src]

type Element = *const T

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.cptrx8.html b/packed_simd/type.cptrx8.html new file mode 100644 index 000000000..b544f3270 --- /dev/null +++ b/packed_simd/type.cptrx8.html @@ -0,0 +1,274 @@ +packed_simd::cptrx8 - Rust

[][src]Type Definition packed_simd::cptrx8

type cptrx8<T> = Simd<[*const T; 8]>;

A vector with 8 *const T lanes

+

Implementations

impl<T> cptrx8<T>[src]

pub const fn new(
    x0: *const T,
    x1: *const T,
    x2: *const T,
    x3: *const T,
    x4: *const T,
    x5: *const T,
    x6: *const T,
    x7: *const T
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *const T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex8[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *const T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *const T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> cptrx8<T>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> cptrx8<T>[src]

pub fn from_slice_aligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*const T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> cptrx8<T>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*const T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> cptrx8<T>[src]

pub unsafe fn offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> cptrx8<T>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> cptrx8<T> where
    [T; 8]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 8]>,
    value: Simd<[T; 8]>
) -> Simd<[T; 8]> where
    M: Mask,
    [M; 8]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

Trait Implementations

impl<T> Debug for cptrx8<T>[src]

impl<T> Default for cptrx8<T>[src]

impl<T> Eq for cptrx8<T>[src]

impl<T> From<[*const T; 8]> for cptrx8<T>[src]

impl<T> Hash for cptrx8<T>[src]

impl<T> Into<[*const T; 8]> for cptrx8<T>[src]

impl<T> PartialEq<Simd<[*const T; 8]>> for cptrx8<T>[src]

impl<T> Simd for cptrx8<T>[src]

type Element = *const T

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.f32x16.html b/packed_simd/type.f32x16.html new file mode 100644 index 000000000..ea988a1a5 --- /dev/null +++ b/packed_simd/type.f32x16.html @@ -0,0 +1,204 @@ +packed_simd::f32x16 - Rust

[][src]Type Definition packed_simd::f32x16

type f32x16 = Simd<[f32; 16]>;

A 512-bit vector with 16 f32 lanes.

+

Implementations

impl f32x16[src]

pub const fn new(
    x0: f32,
    x1: f32,
    x2: f32,
    x3: f32,
    x4: f32,
    x5: f32,
    x6: f32,
    x7: f32,
    x8: f32,
    x9: f32,
    x10: f32,
    x11: f32,
    x12: f32,
    x13: f32,
    x14: f32,
    x15: f32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl f32x16[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl f32x16[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl f32x16[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl f32x16[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x16[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl f32x16[src]

pub const EPSILON: f32x16[src]

Machine epsilon value.

+

pub const MIN: f32x16[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x16[src]

Smallest positive normal value.

+

pub const MAX: f32x16[src]

Largest finite value.

+

pub const NAN: f32x16[src]

Not a Number (NaN).

+

pub const INFINITY: f32x16[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x16[src]

Negative infinity (-∞).

+

pub const PI: f32x16[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x16[src]

π/2

+

pub const FRAC_PI_3: f32x16[src]

π/3

+

pub const FRAC_PI_4: f32x16[src]

π/4

+

pub const FRAC_PI_6: f32x16[src]

π/6

+

pub const FRAC_PI_8: f32x16[src]

π/8

+

pub const FRAC_1_PI: f32x16[src]

1/π

+

pub const FRAC_2_PI: f32x16[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x16[src]

2/sqrt(π)

+

pub const SQRT_2: f32x16[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x16[src]

1/sqrt(2)

+

pub const E: f32x16[src]

Euler's number (e)

+

pub const LOG2_E: f32x16[src]

log2(e)

+

pub const LOG10_E: f32x16[src]

log10(e)

+

pub const LN_2: f32x16[src]

ln(2)

+

pub const LN_10: f32x16[src]

ln(10)

+

impl f32x16[src]

pub fn is_nan(self) -> m32x16[src]

pub fn is_infinite(self) -> m32x16[src]

pub fn is_finite(self) -> m32x16[src]

impl f32x16[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl f32x16[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl f32x16[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl f32x16[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl f32x16[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl f32x16[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl f32x16[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl f32x16[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x16[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x16[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl f32x16[src]

pub fn sqrt(self) -> Self[src]

impl f32x16[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x16[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl f32x16[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

Trait Implementations

impl Add<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<f32> for f32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f32; 16]>> for f32x16[src]

impl AddAssign<f32> for f32x16[src]

impl Debug for f32x16[src]

impl Default for f32x16[src]

impl Div<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<f32> for f32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f32; 16]>> for f32x16[src]

impl DivAssign<f32> for f32x16[src]

impl From<[f32; 16]> for f32x16[src]

impl From<Simd<[i16; 16]>> for f32x16[src]

impl From<Simd<[i8; 16]>> for f32x16[src]

impl From<Simd<[u16; 16]>> for f32x16[src]

impl From<Simd<[u8; 16]>> for f32x16[src]

impl FromBits<Simd<[f64; 8]>> for f32x16[src]

impl FromBits<Simd<[i128; 4]>> for f32x16[src]

impl FromBits<Simd<[i16; 32]>> for f32x16[src]

impl FromBits<Simd<[i32; 16]>> for f32x16[src]

impl FromBits<Simd<[i64; 8]>> for f32x16[src]

impl FromBits<Simd<[i8; 64]>> for f32x16[src]

impl FromBits<Simd<[m128; 4]>> for f32x16[src]

impl FromBits<Simd<[m16; 32]>> for f32x16[src]

impl FromBits<Simd<[m32; 16]>> for f32x16[src]

impl FromBits<Simd<[m64; 8]>> for f32x16[src]

impl FromBits<Simd<[m8; 64]>> for f32x16[src]

impl FromBits<Simd<[u128; 4]>> for f32x16[src]

impl FromBits<Simd<[u16; 32]>> for f32x16[src]

impl FromBits<Simd<[u32; 16]>> for f32x16[src]

impl FromBits<Simd<[u64; 8]>> for f32x16[src]

impl FromBits<Simd<[u8; 64]>> for f32x16[src]

impl FromCast<Simd<[i16; 16]>> for f32x16[src]

impl FromCast<Simd<[i32; 16]>> for f32x16[src]

impl FromCast<Simd<[i8; 16]>> for f32x16[src]

impl FromCast<Simd<[m16; 16]>> for f32x16[src]

impl FromCast<Simd<[m32; 16]>> for f32x16[src]

impl FromCast<Simd<[m8; 16]>> for f32x16[src]

impl FromCast<Simd<[u16; 16]>> for f32x16[src]

impl FromCast<Simd<[u32; 16]>> for f32x16[src]

impl FromCast<Simd<[u8; 16]>> for f32x16[src]

impl Mul<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<f32> for f32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f32; 16]>> for f32x16[src]

impl MulAssign<f32> for f32x16[src]

impl Neg for f32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl PartialEq<Simd<[f32; 16]>> for f32x16[src]

impl<'a> Product<&'a Simd<[f32; 16]>> for f32x16[src]

impl Product<Simd<[f32; 16]>> for f32x16[src]

impl Rem<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<f32> for f32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f32; 16]>> for f32x16[src]

impl RemAssign<f32> for f32x16[src]

impl Simd for f32x16[src]

type Element = f32

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+

impl Sub<Simd<[f32; 16]>> for f32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<f32> for f32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f32; 16]>> for f32x16[src]

impl SubAssign<f32> for f32x16[src]

impl<'a> Sum<&'a Simd<[f32; 16]>> for f32x16[src]

impl Sum<Simd<[f32; 16]>> for f32x16[src]

\ No newline at end of file diff --git a/packed_simd/type.f32x2.html b/packed_simd/type.f32x2.html new file mode 100644 index 000000000..ff7656a55 --- /dev/null +++ b/packed_simd/type.f32x2.html @@ -0,0 +1,208 @@ +packed_simd::f32x2 - Rust

[][src]Type Definition packed_simd::f32x2

type f32x2 = Simd<[f32; 2]>;

A 64-bit vector with 2 f32 lanes.

+

Implementations

impl f32x2[src]

pub const fn new(x0: f32, x1: f32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl f32x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl f32x2[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl f32x2[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl f32x2[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl f32x2[src]

pub const EPSILON: f32x2[src]

Machine epsilon value.

+

pub const MIN: f32x2[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x2[src]

Smallest positive normal value.

+

pub const MAX: f32x2[src]

Largest finite value.

+

pub const NAN: f32x2[src]

Not a Number (NaN).

+

pub const INFINITY: f32x2[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x2[src]

Negative infinity (-∞).

+

pub const PI: f32x2[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x2[src]

π/2

+

pub const FRAC_PI_3: f32x2[src]

π/3

+

pub const FRAC_PI_4: f32x2[src]

π/4

+

pub const FRAC_PI_6: f32x2[src]

π/6

+

pub const FRAC_PI_8: f32x2[src]

π/8

+

pub const FRAC_1_PI: f32x2[src]

1/π

+

pub const FRAC_2_PI: f32x2[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x2[src]

2/sqrt(π)

+

pub const SQRT_2: f32x2[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x2[src]

1/sqrt(2)

+

pub const E: f32x2[src]

Euler's number (e)

+

pub const LOG2_E: f32x2[src]

log2(e)

+

pub const LOG10_E: f32x2[src]

log10(e)

+

pub const LN_2: f32x2[src]

ln(2)

+

pub const LN_10: f32x2[src]

ln(10)

+

impl f32x2[src]

pub fn is_nan(self) -> m32x2[src]

pub fn is_infinite(self) -> m32x2[src]

pub fn is_finite(self) -> m32x2[src]

impl f32x2[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl f32x2[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl f32x2[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl f32x2[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl f32x2[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl f32x2[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl f32x2[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl f32x2[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x2[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x2[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl f32x2[src]

pub fn sqrt(self) -> Self[src]

impl f32x2[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x2[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl f32x2[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

Trait Implementations

impl Add<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<f32> for f32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f32; 2]>> for f32x2[src]

impl AddAssign<f32> for f32x2[src]

impl Debug for f32x2[src]

impl Default for f32x2[src]

impl Div<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<f32> for f32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f32; 2]>> for f32x2[src]

impl DivAssign<f32> for f32x2[src]

impl From<[f32; 2]> for f32x2[src]

impl From<Simd<[i16; 2]>> for f32x2[src]

impl From<Simd<[i8; 2]>> for f32x2[src]

impl From<Simd<[u16; 2]>> for f32x2[src]

impl From<Simd<[u8; 2]>> for f32x2[src]

impl FromBits<Simd<[i16; 4]>> for f32x2[src]

impl FromBits<Simd<[i32; 2]>> for f32x2[src]

impl FromBits<Simd<[i8; 8]>> for f32x2[src]

impl FromBits<Simd<[m16; 4]>> for f32x2[src]

impl FromBits<Simd<[m32; 2]>> for f32x2[src]

impl FromBits<Simd<[m8; 8]>> for f32x2[src]

impl FromBits<Simd<[u16; 4]>> for f32x2[src]

impl FromBits<Simd<[u32; 2]>> for f32x2[src]

impl FromBits<Simd<[u8; 8]>> for f32x2[src]

impl FromBits<__m64> for f32x2[src]

impl FromCast<Simd<[f64; 2]>> for f32x2[src]

impl FromCast<Simd<[i128; 2]>> for f32x2[src]

impl FromCast<Simd<[i16; 2]>> for f32x2[src]

impl FromCast<Simd<[i32; 2]>> for f32x2[src]

impl FromCast<Simd<[i64; 2]>> for f32x2[src]

impl FromCast<Simd<[i8; 2]>> for f32x2[src]

impl FromCast<Simd<[isize; 2]>> for f32x2[src]

impl FromCast<Simd<[m128; 2]>> for f32x2[src]

impl FromCast<Simd<[m16; 2]>> for f32x2[src]

impl FromCast<Simd<[m32; 2]>> for f32x2[src]

impl FromCast<Simd<[m64; 2]>> for f32x2[src]

impl FromCast<Simd<[m8; 2]>> for f32x2[src]

impl FromCast<Simd<[msize; 2]>> for f32x2[src]

impl FromCast<Simd<[u128; 2]>> for f32x2[src]

impl FromCast<Simd<[u16; 2]>> for f32x2[src]

impl FromCast<Simd<[u32; 2]>> for f32x2[src]

impl FromCast<Simd<[u64; 2]>> for f32x2[src]

impl FromCast<Simd<[u8; 2]>> for f32x2[src]

impl FromCast<Simd<[usize; 2]>> for f32x2[src]

impl Mul<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<f32> for f32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f32; 2]>> for f32x2[src]

impl MulAssign<f32> for f32x2[src]

impl Neg for f32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl PartialEq<Simd<[f32; 2]>> for f32x2[src]

impl<'a> Product<&'a Simd<[f32; 2]>> for f32x2[src]

impl Product<Simd<[f32; 2]>> for f32x2[src]

impl Rem<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<f32> for f32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f32; 2]>> for f32x2[src]

impl RemAssign<f32> for f32x2[src]

impl Simd for f32x2[src]

type Element = f32

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[f32; 2]>> for f32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<f32> for f32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f32; 2]>> for f32x2[src]

impl SubAssign<f32> for f32x2[src]

impl<'a> Sum<&'a Simd<[f32; 2]>> for f32x2[src]

impl Sum<Simd<[f32; 2]>> for f32x2[src]

\ No newline at end of file diff --git a/packed_simd/type.f32x4.html b/packed_simd/type.f32x4.html new file mode 100644 index 000000000..ce72bd64d --- /dev/null +++ b/packed_simd/type.f32x4.html @@ -0,0 +1,217 @@ +packed_simd::f32x4 - Rust

[][src]Type Definition packed_simd::f32x4

type f32x4 = Simd<[f32; 4]>;

A 128-bit vector with 4 f32 lanes.

+

Implementations

impl f32x4[src]

pub const fn new(x0: f32, x1: f32, x2: f32, x3: f32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl f32x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl f32x4[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl f32x4[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl f32x4[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl f32x4[src]

pub const EPSILON: f32x4[src]

Machine epsilon value.

+

pub const MIN: f32x4[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x4[src]

Smallest positive normal value.

+

pub const MAX: f32x4[src]

Largest finite value.

+

pub const NAN: f32x4[src]

Not a Number (NaN).

+

pub const INFINITY: f32x4[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x4[src]

Negative infinity (-∞).

+

pub const PI: f32x4[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x4[src]

π/2

+

pub const FRAC_PI_3: f32x4[src]

π/3

+

pub const FRAC_PI_4: f32x4[src]

π/4

+

pub const FRAC_PI_6: f32x4[src]

π/6

+

pub const FRAC_PI_8: f32x4[src]

π/8

+

pub const FRAC_1_PI: f32x4[src]

1/π

+

pub const FRAC_2_PI: f32x4[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x4[src]

2/sqrt(π)

+

pub const SQRT_2: f32x4[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x4[src]

1/sqrt(2)

+

pub const E: f32x4[src]

Euler's number (e)

+

pub const LOG2_E: f32x4[src]

log2(e)

+

pub const LOG10_E: f32x4[src]

log10(e)

+

pub const LN_2: f32x4[src]

ln(2)

+

pub const LN_10: f32x4[src]

ln(10)

+

impl f32x4[src]

pub fn is_nan(self) -> m32x4[src]

pub fn is_infinite(self) -> m32x4[src]

pub fn is_finite(self) -> m32x4[src]

impl f32x4[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl f32x4[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl f32x4[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl f32x4[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl f32x4[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl f32x4[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl f32x4[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl f32x4[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x4[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x4[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl f32x4[src]

pub fn sqrt(self) -> Self[src]

impl f32x4[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x4[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl f32x4[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

Trait Implementations

impl Add<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<f32> for f32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f32; 4]>> for f32x4[src]

impl AddAssign<f32> for f32x4[src]

impl Debug for f32x4[src]

impl Default for f32x4[src]

impl Div<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<f32> for f32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f32; 4]>> for f32x4[src]

impl DivAssign<f32> for f32x4[src]

impl From<[f32; 4]> for f32x4[src]

impl From<Simd<[i16; 4]>> for f32x4[src]

impl From<Simd<[i8; 4]>> for f32x4[src]

impl From<Simd<[u16; 4]>> for f32x4[src]

impl From<Simd<[u8; 4]>> for f32x4[src]

impl FromBits<Simd<[f64; 2]>> for f32x4[src]

impl FromBits<Simd<[i128; 1]>> for f32x4[src]

impl FromBits<Simd<[i16; 8]>> for f32x4[src]

impl FromBits<Simd<[i32; 4]>> for f32x4[src]

impl FromBits<Simd<[i64; 2]>> for f32x4[src]

impl FromBits<Simd<[i8; 16]>> for f32x4[src]

impl FromBits<Simd<[m128; 1]>> for f32x4[src]

impl FromBits<Simd<[m16; 8]>> for f32x4[src]

impl FromBits<Simd<[m32; 4]>> for f32x4[src]

impl FromBits<Simd<[m64; 2]>> for f32x4[src]

impl FromBits<Simd<[m8; 16]>> for f32x4[src]

impl FromBits<Simd<[u128; 1]>> for f32x4[src]

impl FromBits<Simd<[u16; 8]>> for f32x4[src]

impl FromBits<Simd<[u32; 4]>> for f32x4[src]

impl FromBits<Simd<[u64; 2]>> for f32x4[src]

impl FromBits<Simd<[u8; 16]>> for f32x4[src]

impl FromBits<__m128> for f32x4[src]

impl FromBits<__m128d> for f32x4[src]

impl FromBits<__m128i> for f32x4[src]

impl FromCast<Simd<[f64; 4]>> for f32x4[src]

impl FromCast<Simd<[i128; 4]>> for f32x4[src]

impl FromCast<Simd<[i16; 4]>> for f32x4[src]

impl FromCast<Simd<[i32; 4]>> for f32x4[src]

impl FromCast<Simd<[i64; 4]>> for f32x4[src]

impl FromCast<Simd<[i8; 4]>> for f32x4[src]

impl FromCast<Simd<[isize; 4]>> for f32x4[src]

impl FromCast<Simd<[m128; 4]>> for f32x4[src]

impl FromCast<Simd<[m16; 4]>> for f32x4[src]

impl FromCast<Simd<[m32; 4]>> for f32x4[src]

impl FromCast<Simd<[m64; 4]>> for f32x4[src]

impl FromCast<Simd<[m8; 4]>> for f32x4[src]

impl FromCast<Simd<[msize; 4]>> for f32x4[src]

impl FromCast<Simd<[u128; 4]>> for f32x4[src]

impl FromCast<Simd<[u16; 4]>> for f32x4[src]

impl FromCast<Simd<[u32; 4]>> for f32x4[src]

impl FromCast<Simd<[u64; 4]>> for f32x4[src]

impl FromCast<Simd<[u8; 4]>> for f32x4[src]

impl FromCast<Simd<[usize; 4]>> for f32x4[src]

impl Mul<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<f32> for f32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f32; 4]>> for f32x4[src]

impl MulAssign<f32> for f32x4[src]

impl Neg for f32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl PartialEq<Simd<[f32; 4]>> for f32x4[src]

impl<'a> Product<&'a Simd<[f32; 4]>> for f32x4[src]

impl Product<Simd<[f32; 4]>> for f32x4[src]

impl Rem<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<f32> for f32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f32; 4]>> for f32x4[src]

impl RemAssign<f32> for f32x4[src]

impl Simd for f32x4[src]

type Element = f32

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[f32; 4]>> for f32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<f32> for f32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f32; 4]>> for f32x4[src]

impl SubAssign<f32> for f32x4[src]

impl<'a> Sum<&'a Simd<[f32; 4]>> for f32x4[src]

impl Sum<Simd<[f32; 4]>> for f32x4[src]

\ No newline at end of file diff --git a/packed_simd/type.f32x8.html b/packed_simd/type.f32x8.html new file mode 100644 index 000000000..cbd2e9fc8 --- /dev/null +++ b/packed_simd/type.f32x8.html @@ -0,0 +1,214 @@ +packed_simd::f32x8 - Rust

[][src]Type Definition packed_simd::f32x8

type f32x8 = Simd<[f32; 8]>;

A 256-bit vector with 8 f32 lanes.

+

Implementations

impl f32x8[src]

pub const fn new(
    x0: f32,
    x1: f32,
    x2: f32,
    x3: f32,
    x4: f32,
    x5: f32,
    x6: f32,
    x7: f32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl f32x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl f32x8[src]

pub fn sum(self) -> f32[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f32[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl f32x8[src]

pub fn max_element(self) -> f32[src]

Largest vector element value.

+

pub fn min_element(self) -> f32[src]

Smallest vector element value.

+

impl f32x8[src]

pub fn from_slice_aligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f32x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl f32x8[src]

pub const EPSILON: f32x8[src]

Machine epsilon value.

+

pub const MIN: f32x8[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f32x8[src]

Smallest positive normal value.

+

pub const MAX: f32x8[src]

Largest finite value.

+

pub const NAN: f32x8[src]

Not a Number (NaN).

+

pub const INFINITY: f32x8[src]

Infinity (∞).

+

pub const NEG_INFINITY: f32x8[src]

Negative infinity (-∞).

+

pub const PI: f32x8[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f32x8[src]

π/2

+

pub const FRAC_PI_3: f32x8[src]

π/3

+

pub const FRAC_PI_4: f32x8[src]

π/4

+

pub const FRAC_PI_6: f32x8[src]

π/6

+

pub const FRAC_PI_8: f32x8[src]

π/8

+

pub const FRAC_1_PI: f32x8[src]

1/π

+

pub const FRAC_2_PI: f32x8[src]

2/π

+

pub const FRAC_2_SQRT_PI: f32x8[src]

2/sqrt(π)

+

pub const SQRT_2: f32x8[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f32x8[src]

1/sqrt(2)

+

pub const E: f32x8[src]

Euler's number (e)

+

pub const LOG2_E: f32x8[src]

log2(e)

+

pub const LOG10_E: f32x8[src]

log10(e)

+

pub const LN_2: f32x8[src]

ln(2)

+

pub const LN_10: f32x8[src]

ln(10)

+

impl f32x8[src]

pub fn is_nan(self) -> m32x8[src]

pub fn is_infinite(self) -> m32x8[src]

pub fn is_finite(self) -> m32x8[src]

impl f32x8[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl f32x8[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl f32x8[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl f32x8[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl f32x8[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl f32x8[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl f32x8[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl f32x8[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x8[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x8[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl f32x8[src]

pub fn sqrt(self) -> Self[src]

impl f32x8[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f32x8[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl f32x8[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

Trait Implementations

impl Add<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<f32> for f32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f32; 8]>> for f32x8[src]

impl AddAssign<f32> for f32x8[src]

impl Debug for f32x8[src]

impl Default for f32x8[src]

impl Div<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<f32> for f32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f32; 8]>> for f32x8[src]

impl DivAssign<f32> for f32x8[src]

impl From<[f32; 8]> for f32x8[src]

impl From<Simd<[i16; 8]>> for f32x8[src]

impl From<Simd<[i8; 8]>> for f32x8[src]

impl From<Simd<[u16; 8]>> for f32x8[src]

impl From<Simd<[u8; 8]>> for f32x8[src]

impl FromBits<Simd<[f64; 4]>> for f32x8[src]

impl FromBits<Simd<[i128; 2]>> for f32x8[src]

impl FromBits<Simd<[i16; 16]>> for f32x8[src]

impl FromBits<Simd<[i32; 8]>> for f32x8[src]

impl FromBits<Simd<[i64; 4]>> for f32x8[src]

impl FromBits<Simd<[i8; 32]>> for f32x8[src]

impl FromBits<Simd<[m128; 2]>> for f32x8[src]

impl FromBits<Simd<[m16; 16]>> for f32x8[src]

impl FromBits<Simd<[m32; 8]>> for f32x8[src]

impl FromBits<Simd<[m64; 4]>> for f32x8[src]

impl FromBits<Simd<[m8; 32]>> for f32x8[src]

impl FromBits<Simd<[u128; 2]>> for f32x8[src]

impl FromBits<Simd<[u16; 16]>> for f32x8[src]

impl FromBits<Simd<[u32; 8]>> for f32x8[src]

impl FromBits<Simd<[u64; 4]>> for f32x8[src]

impl FromBits<Simd<[u8; 32]>> for f32x8[src]

impl FromBits<__m256> for f32x8[src]

impl FromBits<__m256d> for f32x8[src]

impl FromBits<__m256i> for f32x8[src]

impl FromCast<Simd<[f64; 8]>> for f32x8[src]

impl FromCast<Simd<[i16; 8]>> for f32x8[src]

impl FromCast<Simd<[i32; 8]>> for f32x8[src]

impl FromCast<Simd<[i64; 8]>> for f32x8[src]

impl FromCast<Simd<[i8; 8]>> for f32x8[src]

impl FromCast<Simd<[isize; 8]>> for f32x8[src]

impl FromCast<Simd<[m16; 8]>> for f32x8[src]

impl FromCast<Simd<[m32; 8]>> for f32x8[src]

impl FromCast<Simd<[m64; 8]>> for f32x8[src]

impl FromCast<Simd<[m8; 8]>> for f32x8[src]

impl FromCast<Simd<[msize; 8]>> for f32x8[src]

impl FromCast<Simd<[u16; 8]>> for f32x8[src]

impl FromCast<Simd<[u32; 8]>> for f32x8[src]

impl FromCast<Simd<[u64; 8]>> for f32x8[src]

impl FromCast<Simd<[u8; 8]>> for f32x8[src]

impl FromCast<Simd<[usize; 8]>> for f32x8[src]

impl Mul<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<f32> for f32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f32; 8]>> for f32x8[src]

impl MulAssign<f32> for f32x8[src]

impl Neg for f32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl PartialEq<Simd<[f32; 8]>> for f32x8[src]

impl<'a> Product<&'a Simd<[f32; 8]>> for f32x8[src]

impl Product<Simd<[f32; 8]>> for f32x8[src]

impl Rem<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<f32> for f32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f32; 8]>> for f32x8[src]

impl RemAssign<f32> for f32x8[src]

impl Simd for f32x8[src]

type Element = f32

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[f32; 8]>> for f32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<f32> for f32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f32; 8]>> for f32x8[src]

impl SubAssign<f32> for f32x8[src]

impl<'a> Sum<&'a Simd<[f32; 8]>> for f32x8[src]

impl Sum<Simd<[f32; 8]>> for f32x8[src]

\ No newline at end of file diff --git a/packed_simd/type.f64x2.html b/packed_simd/type.f64x2.html new file mode 100644 index 000000000..41dd555f7 --- /dev/null +++ b/packed_simd/type.f64x2.html @@ -0,0 +1,220 @@ +packed_simd::f64x2 - Rust

[][src]Type Definition packed_simd::f64x2

type f64x2 = Simd<[f64; 2]>;

A 128-bit vector with 2 f64 lanes.

+

Implementations

impl f64x2[src]

pub const fn new(x0: f64, x1: f64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl f64x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl f64x2[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl f64x2[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

+

pub fn min_element(self) -> f64[src]

Smallest vector element value.

+

impl f64x2[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f64x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f64x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl f64x2[src]

pub const EPSILON: f64x2[src]

Machine epsilon value.

+

pub const MIN: f64x2[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f64x2[src]

Smallest positive normal value.

+

pub const MAX: f64x2[src]

Largest finite value.

+

pub const NAN: f64x2[src]

Not a Number (NaN).

+

pub const INFINITY: f64x2[src]

Infinity (∞).

+

pub const NEG_INFINITY: f64x2[src]

Negative infinity (-∞).

+

pub const PI: f64x2[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f64x2[src]

π/2

+

pub const FRAC_PI_3: f64x2[src]

π/3

+

pub const FRAC_PI_4: f64x2[src]

π/4

+

pub const FRAC_PI_6: f64x2[src]

π/6

+

pub const FRAC_PI_8: f64x2[src]

π/8

+

pub const FRAC_1_PI: f64x2[src]

1/π

+

pub const FRAC_2_PI: f64x2[src]

2/π

+

pub const FRAC_2_SQRT_PI: f64x2[src]

2/sqrt(π)

+

pub const SQRT_2: f64x2[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f64x2[src]

1/sqrt(2)

+

pub const E: f64x2[src]

Euler's number (e)

+

pub const LOG2_E: f64x2[src]

log2(e)

+

pub const LOG10_E: f64x2[src]

log10(e)

+

pub const LN_2: f64x2[src]

ln(2)

+

pub const LN_10: f64x2[src]

ln(10)

+

impl f64x2[src]

pub fn is_nan(self) -> m64x2[src]

pub fn is_infinite(self) -> m64x2[src]

pub fn is_finite(self) -> m64x2[src]

impl f64x2[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl f64x2[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl f64x2[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl f64x2[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl f64x2[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl f64x2[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl f64x2[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl f64x2[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x2[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x2[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl f64x2[src]

pub fn sqrt(self) -> Self[src]

impl f64x2[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x2[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl f64x2[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

Trait Implementations

impl Add<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<f64> for f64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f64; 2]>> for f64x2[src]

impl AddAssign<f64> for f64x2[src]

impl Debug for f64x2[src]

impl Default for f64x2[src]

impl Div<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<f64> for f64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f64; 2]>> for f64x2[src]

impl DivAssign<f64> for f64x2[src]

impl From<[f64; 2]> for f64x2[src]

impl From<Simd<[f32; 2]>> for f64x2[src]

impl From<Simd<[i16; 2]>> for f64x2[src]

impl From<Simd<[i32; 2]>> for f64x2[src]

impl From<Simd<[i8; 2]>> for f64x2[src]

impl From<Simd<[u16; 2]>> for f64x2[src]

impl From<Simd<[u32; 2]>> for f64x2[src]

impl From<Simd<[u8; 2]>> for f64x2[src]

impl FromBits<Simd<[f32; 4]>> for f64x2[src]

impl FromBits<Simd<[i128; 1]>> for f64x2[src]

impl FromBits<Simd<[i16; 8]>> for f64x2[src]

impl FromBits<Simd<[i32; 4]>> for f64x2[src]

impl FromBits<Simd<[i64; 2]>> for f64x2[src]

impl FromBits<Simd<[i8; 16]>> for f64x2[src]

impl FromBits<Simd<[m128; 1]>> for f64x2[src]

impl FromBits<Simd<[m16; 8]>> for f64x2[src]

impl FromBits<Simd<[m32; 4]>> for f64x2[src]

impl FromBits<Simd<[m64; 2]>> for f64x2[src]

impl FromBits<Simd<[m8; 16]>> for f64x2[src]

impl FromBits<Simd<[u128; 1]>> for f64x2[src]

impl FromBits<Simd<[u16; 8]>> for f64x2[src]

impl FromBits<Simd<[u32; 4]>> for f64x2[src]

impl FromBits<Simd<[u64; 2]>> for f64x2[src]

impl FromBits<Simd<[u8; 16]>> for f64x2[src]

impl FromBits<__m128> for f64x2[src]

impl FromBits<__m128d> for f64x2[src]

impl FromBits<__m128i> for f64x2[src]

impl FromCast<Simd<[f32; 2]>> for f64x2[src]

impl FromCast<Simd<[i128; 2]>> for f64x2[src]

impl FromCast<Simd<[i16; 2]>> for f64x2[src]

impl FromCast<Simd<[i32; 2]>> for f64x2[src]

impl FromCast<Simd<[i64; 2]>> for f64x2[src]

impl FromCast<Simd<[i8; 2]>> for f64x2[src]

impl FromCast<Simd<[isize; 2]>> for f64x2[src]

impl FromCast<Simd<[m128; 2]>> for f64x2[src]

impl FromCast<Simd<[m16; 2]>> for f64x2[src]

impl FromCast<Simd<[m32; 2]>> for f64x2[src]

impl FromCast<Simd<[m64; 2]>> for f64x2[src]

impl FromCast<Simd<[m8; 2]>> for f64x2[src]

impl FromCast<Simd<[msize; 2]>> for f64x2[src]

impl FromCast<Simd<[u128; 2]>> for f64x2[src]

impl FromCast<Simd<[u16; 2]>> for f64x2[src]

impl FromCast<Simd<[u32; 2]>> for f64x2[src]

impl FromCast<Simd<[u64; 2]>> for f64x2[src]

impl FromCast<Simd<[u8; 2]>> for f64x2[src]

impl FromCast<Simd<[usize; 2]>> for f64x2[src]

impl Mul<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<f64> for f64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f64; 2]>> for f64x2[src]

impl MulAssign<f64> for f64x2[src]

impl Neg for f64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl PartialEq<Simd<[f64; 2]>> for f64x2[src]

impl<'a> Product<&'a Simd<[f64; 2]>> for f64x2[src]

impl Product<Simd<[f64; 2]>> for f64x2[src]

impl Rem<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<f64> for f64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f64; 2]>> for f64x2[src]

impl RemAssign<f64> for f64x2[src]

impl Simd for f64x2[src]

type Element = f64

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[f64; 2]>> for f64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<f64> for f64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f64; 2]>> for f64x2[src]

impl SubAssign<f64> for f64x2[src]

impl<'a> Sum<&'a Simd<[f64; 2]>> for f64x2[src]

impl Sum<Simd<[f64; 2]>> for f64x2[src]

\ No newline at end of file diff --git a/packed_simd/type.f64x4.html b/packed_simd/type.f64x4.html new file mode 100644 index 000000000..4ffa39674 --- /dev/null +++ b/packed_simd/type.f64x4.html @@ -0,0 +1,220 @@ +packed_simd::f64x4 - Rust

[][src]Type Definition packed_simd::f64x4

type f64x4 = Simd<[f64; 4]>;

A 256-bit vector with 4 f64 lanes.

+

Implementations

impl f64x4[src]

pub const fn new(x0: f64, x1: f64, x2: f64, x3: f64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl f64x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl f64x4[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl f64x4[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

+

pub fn min_element(self) -> f64[src]

Smallest vector element value.

+

impl f64x4[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f64x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f64x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl f64x4[src]

pub const EPSILON: f64x4[src]

Machine epsilon value.

+

pub const MIN: f64x4[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f64x4[src]

Smallest positive normal value.

+

pub const MAX: f64x4[src]

Largest finite value.

+

pub const NAN: f64x4[src]

Not a Number (NaN).

+

pub const INFINITY: f64x4[src]

Infinity (∞).

+

pub const NEG_INFINITY: f64x4[src]

Negative infinity (-∞).

+

pub const PI: f64x4[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f64x4[src]

π/2

+

pub const FRAC_PI_3: f64x4[src]

π/3

+

pub const FRAC_PI_4: f64x4[src]

π/4

+

pub const FRAC_PI_6: f64x4[src]

π/6

+

pub const FRAC_PI_8: f64x4[src]

π/8

+

pub const FRAC_1_PI: f64x4[src]

1/π

+

pub const FRAC_2_PI: f64x4[src]

2/π

+

pub const FRAC_2_SQRT_PI: f64x4[src]

2/sqrt(π)

+

pub const SQRT_2: f64x4[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f64x4[src]

1/sqrt(2)

+

pub const E: f64x4[src]

Euler's number (e)

+

pub const LOG2_E: f64x4[src]

log2(e)

+

pub const LOG10_E: f64x4[src]

log10(e)

+

pub const LN_2: f64x4[src]

ln(2)

+

pub const LN_10: f64x4[src]

ln(10)

+

impl f64x4[src]

pub fn is_nan(self) -> m64x4[src]

pub fn is_infinite(self) -> m64x4[src]

pub fn is_finite(self) -> m64x4[src]

impl f64x4[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl f64x4[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl f64x4[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl f64x4[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl f64x4[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl f64x4[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl f64x4[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl f64x4[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x4[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x4[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl f64x4[src]

pub fn sqrt(self) -> Self[src]

impl f64x4[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x4[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl f64x4[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

Trait Implementations

impl Add<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<f64> for f64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f64; 4]>> for f64x4[src]

impl AddAssign<f64> for f64x4[src]

impl Debug for f64x4[src]

impl Default for f64x4[src]

impl Div<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<f64> for f64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f64; 4]>> for f64x4[src]

impl DivAssign<f64> for f64x4[src]

impl From<[f64; 4]> for f64x4[src]

impl From<Simd<[f32; 4]>> for f64x4[src]

impl From<Simd<[i16; 4]>> for f64x4[src]

impl From<Simd<[i32; 4]>> for f64x4[src]

impl From<Simd<[i8; 4]>> for f64x4[src]

impl From<Simd<[u16; 4]>> for f64x4[src]

impl From<Simd<[u32; 4]>> for f64x4[src]

impl From<Simd<[u8; 4]>> for f64x4[src]

impl FromBits<Simd<[f32; 8]>> for f64x4[src]

impl FromBits<Simd<[i128; 2]>> for f64x4[src]

impl FromBits<Simd<[i16; 16]>> for f64x4[src]

impl FromBits<Simd<[i32; 8]>> for f64x4[src]

impl FromBits<Simd<[i64; 4]>> for f64x4[src]

impl FromBits<Simd<[i8; 32]>> for f64x4[src]

impl FromBits<Simd<[m128; 2]>> for f64x4[src]

impl FromBits<Simd<[m16; 16]>> for f64x4[src]

impl FromBits<Simd<[m32; 8]>> for f64x4[src]

impl FromBits<Simd<[m64; 4]>> for f64x4[src]

impl FromBits<Simd<[m8; 32]>> for f64x4[src]

impl FromBits<Simd<[u128; 2]>> for f64x4[src]

impl FromBits<Simd<[u16; 16]>> for f64x4[src]

impl FromBits<Simd<[u32; 8]>> for f64x4[src]

impl FromBits<Simd<[u64; 4]>> for f64x4[src]

impl FromBits<Simd<[u8; 32]>> for f64x4[src]

impl FromBits<__m256> for f64x4[src]

impl FromBits<__m256d> for f64x4[src]

impl FromBits<__m256i> for f64x4[src]

impl FromCast<Simd<[f32; 4]>> for f64x4[src]

impl FromCast<Simd<[i128; 4]>> for f64x4[src]

impl FromCast<Simd<[i16; 4]>> for f64x4[src]

impl FromCast<Simd<[i32; 4]>> for f64x4[src]

impl FromCast<Simd<[i64; 4]>> for f64x4[src]

impl FromCast<Simd<[i8; 4]>> for f64x4[src]

impl FromCast<Simd<[isize; 4]>> for f64x4[src]

impl FromCast<Simd<[m128; 4]>> for f64x4[src]

impl FromCast<Simd<[m16; 4]>> for f64x4[src]

impl FromCast<Simd<[m32; 4]>> for f64x4[src]

impl FromCast<Simd<[m64; 4]>> for f64x4[src]

impl FromCast<Simd<[m8; 4]>> for f64x4[src]

impl FromCast<Simd<[msize; 4]>> for f64x4[src]

impl FromCast<Simd<[u128; 4]>> for f64x4[src]

impl FromCast<Simd<[u16; 4]>> for f64x4[src]

impl FromCast<Simd<[u32; 4]>> for f64x4[src]

impl FromCast<Simd<[u64; 4]>> for f64x4[src]

impl FromCast<Simd<[u8; 4]>> for f64x4[src]

impl FromCast<Simd<[usize; 4]>> for f64x4[src]

impl Mul<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<f64> for f64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f64; 4]>> for f64x4[src]

impl MulAssign<f64> for f64x4[src]

impl Neg for f64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl PartialEq<Simd<[f64; 4]>> for f64x4[src]

impl<'a> Product<&'a Simd<[f64; 4]>> for f64x4[src]

impl Product<Simd<[f64; 4]>> for f64x4[src]

impl Rem<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<f64> for f64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f64; 4]>> for f64x4[src]

impl RemAssign<f64> for f64x4[src]

impl Simd for f64x4[src]

type Element = f64

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[f64; 4]>> for f64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<f64> for f64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f64; 4]>> for f64x4[src]

impl SubAssign<f64> for f64x4[src]

impl<'a> Sum<&'a Simd<[f64; 4]>> for f64x4[src]

impl Sum<Simd<[f64; 4]>> for f64x4[src]

\ No newline at end of file diff --git a/packed_simd/type.f64x8.html b/packed_simd/type.f64x8.html new file mode 100644 index 000000000..993f7e72a --- /dev/null +++ b/packed_simd/type.f64x8.html @@ -0,0 +1,214 @@ +packed_simd::f64x8 - Rust

[][src]Type Definition packed_simd::f64x8

type f64x8 = Simd<[f64; 8]>;

A 512-bit vector with 8 f64 lanes.

+

Implementations

impl f64x8[src]

pub const fn new(
    x0: f64,
    x1: f64,
    x2: f64,
    x3: f64,
    x4: f64,
    x5: f64,
    x6: f64,
    x7: f64
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: f64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> f64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> f64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: f64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl f64x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl f64x8[src]

pub fn sum(self) -> f64[src]

Horizontal sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

pub fn product(self) -> f64[src]

Horizontal product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If one of the vector element is NaN the reduction returns +NaN. The resulting NaN is not required to be equal to any +of the NaNs in the vector.

+

impl f64x8[src]

pub fn max_element(self) -> f64[src]

Largest vector element value.

+

pub fn min_element(self) -> f64[src]

Smallest vector element value.

+

impl f64x8[src]

pub fn from_slice_aligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[f64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f64x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [f64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl f64x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl f64x8[src]

pub const EPSILON: f64x8[src]

Machine epsilon value.

+

pub const MIN: f64x8[src]

Smallest finite value.

+

pub const MIN_POSITIVE: f64x8[src]

Smallest positive normal value.

+

pub const MAX: f64x8[src]

Largest finite value.

+

pub const NAN: f64x8[src]

Not a Number (NaN).

+

pub const INFINITY: f64x8[src]

Infinity (∞).

+

pub const NEG_INFINITY: f64x8[src]

Negative infinity (-∞).

+

pub const PI: f64x8[src]

Archimedes' constant (π)

+

pub const FRAC_PI_2: f64x8[src]

π/2

+

pub const FRAC_PI_3: f64x8[src]

π/3

+

pub const FRAC_PI_4: f64x8[src]

π/4

+

pub const FRAC_PI_6: f64x8[src]

π/6

+

pub const FRAC_PI_8: f64x8[src]

π/8

+

pub const FRAC_1_PI: f64x8[src]

1/π

+

pub const FRAC_2_PI: f64x8[src]

2/π

+

pub const FRAC_2_SQRT_PI: f64x8[src]

2/sqrt(π)

+

pub const SQRT_2: f64x8[src]

sqrt(2)

+

pub const FRAC_1_SQRT_2: f64x8[src]

1/sqrt(2)

+

pub const E: f64x8[src]

Euler's number (e)

+

pub const LOG2_E: f64x8[src]

log2(e)

+

pub const LOG10_E: f64x8[src]

log10(e)

+

pub const LN_2: f64x8[src]

ln(2)

+

pub const LN_10: f64x8[src]

ln(10)

+

impl f64x8[src]

pub fn is_nan(self) -> m64x8[src]

pub fn is_infinite(self) -> m64x8[src]

pub fn is_finite(self) -> m64x8[src]

impl f64x8[src]

pub fn abs(self) -> Self[src]

Absolute value.

+

impl f64x8[src]

pub fn cos(self) -> Self[src]

Cosine.

+

pub fn cos_pi(self) -> Self[src]

Cosine of self * PI.

+

impl f64x8[src]

pub fn exp(self) -> Self[src]

Returns the exponential function of self: e^(self).

+

impl f64x8[src]

pub fn ln(self) -> Self[src]

Returns the natural logarithm of self.

+

impl f64x8[src]

pub fn mul_add(self, y: Self, z: Self) -> Self[src]

Fused multiply add: self * y + z

+

impl f64x8[src]

pub fn mul_adde(self, y: Self, z: Self) -> Self[src]

Fused multiply add estimate: ~= self * y + z

+

While fused multiply-add (fma) has infinite precision, +mul_adde has at worst the same precision of a multiply followed by an add. +This might be more efficient on architectures that do not have an fma instruction.

+

impl f64x8[src]

pub fn powf(self, x: Self) -> Self[src]

Raises self number to the floating point power of x.

+

impl f64x8[src]

pub fn recpre(self) -> Self[src]

Reciprocal estimate: ~= 1. / self.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x8[src]

pub fn rsqrte(self) -> Self[src]

Reciprocal square-root estimate: ~= 1. / self.sqrt().

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x8[src]

pub fn sin(self) -> Self[src]

Sine.

+

pub fn sin_pi(self) -> Self[src]

Sine of self * PI.

+

pub fn sin_cos_pi(self) -> (Self, Self)[src]

Sine and cosine of self * PI.

+

impl f64x8[src]

pub fn sqrt(self) -> Self[src]

impl f64x8[src]

pub fn sqrte(self) -> Self[src]

Square-root estimate.

+

FIXME: The precision of the estimate is currently unspecified.

+

impl f64x8[src]

pub fn tanh(self) -> Self[src]

Tanh.

+

impl f64x8[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

Trait Implementations

impl Add<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<f64> for f64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[f64; 8]>> for f64x8[src]

impl AddAssign<f64> for f64x8[src]

impl Debug for f64x8[src]

impl Default for f64x8[src]

impl Div<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<f64> for f64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[f64; 8]>> for f64x8[src]

impl DivAssign<f64> for f64x8[src]

impl From<[f64; 8]> for f64x8[src]

impl From<Simd<[f32; 8]>> for f64x8[src]

impl From<Simd<[i16; 8]>> for f64x8[src]

impl From<Simd<[i32; 8]>> for f64x8[src]

impl From<Simd<[i8; 8]>> for f64x8[src]

impl From<Simd<[u16; 8]>> for f64x8[src]

impl From<Simd<[u32; 8]>> for f64x8[src]

impl From<Simd<[u8; 8]>> for f64x8[src]

impl FromBits<Simd<[f32; 16]>> for f64x8[src]

impl FromBits<Simd<[i128; 4]>> for f64x8[src]

impl FromBits<Simd<[i16; 32]>> for f64x8[src]

impl FromBits<Simd<[i32; 16]>> for f64x8[src]

impl FromBits<Simd<[i64; 8]>> for f64x8[src]

impl FromBits<Simd<[i8; 64]>> for f64x8[src]

impl FromBits<Simd<[m128; 4]>> for f64x8[src]

impl FromBits<Simd<[m16; 32]>> for f64x8[src]

impl FromBits<Simd<[m32; 16]>> for f64x8[src]

impl FromBits<Simd<[m64; 8]>> for f64x8[src]

impl FromBits<Simd<[m8; 64]>> for f64x8[src]

impl FromBits<Simd<[u128; 4]>> for f64x8[src]

impl FromBits<Simd<[u16; 32]>> for f64x8[src]

impl FromBits<Simd<[u32; 16]>> for f64x8[src]

impl FromBits<Simd<[u64; 8]>> for f64x8[src]

impl FromBits<Simd<[u8; 64]>> for f64x8[src]

impl FromCast<Simd<[f32; 8]>> for f64x8[src]

impl FromCast<Simd<[i16; 8]>> for f64x8[src]

impl FromCast<Simd<[i32; 8]>> for f64x8[src]

impl FromCast<Simd<[i64; 8]>> for f64x8[src]

impl FromCast<Simd<[i8; 8]>> for f64x8[src]

impl FromCast<Simd<[isize; 8]>> for f64x8[src]

impl FromCast<Simd<[m16; 8]>> for f64x8[src]

impl FromCast<Simd<[m32; 8]>> for f64x8[src]

impl FromCast<Simd<[m64; 8]>> for f64x8[src]

impl FromCast<Simd<[m8; 8]>> for f64x8[src]

impl FromCast<Simd<[msize; 8]>> for f64x8[src]

impl FromCast<Simd<[u16; 8]>> for f64x8[src]

impl FromCast<Simd<[u32; 8]>> for f64x8[src]

impl FromCast<Simd<[u64; 8]>> for f64x8[src]

impl FromCast<Simd<[u8; 8]>> for f64x8[src]

impl FromCast<Simd<[usize; 8]>> for f64x8[src]

impl Mul<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<f64> for f64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[f64; 8]>> for f64x8[src]

impl MulAssign<f64> for f64x8[src]

impl Neg for f64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl PartialEq<Simd<[f64; 8]>> for f64x8[src]

impl<'a> Product<&'a Simd<[f64; 8]>> for f64x8[src]

impl Product<Simd<[f64; 8]>> for f64x8[src]

impl Rem<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<f64> for f64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[f64; 8]>> for f64x8[src]

impl RemAssign<f64> for f64x8[src]

impl Simd for f64x8[src]

type Element = f64

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[f64; 8]>> for f64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<f64> for f64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[f64; 8]>> for f64x8[src]

impl SubAssign<f64> for f64x8[src]

impl<'a> Sum<&'a Simd<[f64; 8]>> for f64x8[src]

impl Sum<Simd<[f64; 8]>> for f64x8[src]

\ No newline at end of file diff --git a/packed_simd/type.i128x1.html b/packed_simd/type.i128x1.html new file mode 100644 index 000000000..787d1581e --- /dev/null +++ b/packed_simd/type.i128x1.html @@ -0,0 +1,233 @@ +packed_simd::i128x1 - Rust

[][src]Type Definition packed_simd::i128x1

type i128x1 = Simd<[i128; 1]>;

A 128-bit vector with 1 i128 lane.

+

Implementations

impl i128x1[src]

pub const fn new(x0: i128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i128x1[src]

pub fn rotate_left(self, n: i128x1) -> i128x1[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i128x1) -> i128x1[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i128x1[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i128x1[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i128x1[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

+

pub fn min_element(self) -> i128[src]

Smallest vector element value.

+

impl i128x1[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i128x1[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i128x1[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i128x1[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i128x1[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i128x1[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i128x1[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

+

impl i128x1[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x1>[src]

Returns a wrapper that implements PartialOrd.

+

impl i128x1[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x1>[src]

Returns a wrapper that implements Ord.

+

impl i128x1[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i128> for i128x1[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i128; 1]>> for i128x1[src]

impl AddAssign<i128> for i128x1[src]

impl Binary for i128x1[src]

impl BitAnd<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i128> for i128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitAndAssign<i128> for i128x1[src]

impl BitOr<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i128> for i128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitOrAssign<i128> for i128x1[src]

impl BitXor<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i128> for i128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i128; 1]>> for i128x1[src]

impl BitXorAssign<i128> for i128x1[src]

impl Debug for i128x1[src]

impl Default for i128x1[src]

impl Div<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i128> for i128x1[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i128; 1]>> for i128x1[src]

impl DivAssign<i128> for i128x1[src]

impl Eq for i128x1[src]

impl From<[i128; 1]> for i128x1[src]

impl FromBits<Simd<[f32; 4]>> for i128x1[src]

impl FromBits<Simd<[f64; 2]>> for i128x1[src]

impl FromBits<Simd<[i16; 8]>> for i128x1[src]

impl FromBits<Simd<[i32; 4]>> for i128x1[src]

impl FromBits<Simd<[i64; 2]>> for i128x1[src]

impl FromBits<Simd<[i8; 16]>> for i128x1[src]

impl FromBits<Simd<[m128; 1]>> for i128x1[src]

impl FromBits<Simd<[m16; 8]>> for i128x1[src]

impl FromBits<Simd<[m32; 4]>> for i128x1[src]

impl FromBits<Simd<[m64; 2]>> for i128x1[src]

impl FromBits<Simd<[m8; 16]>> for i128x1[src]

impl FromBits<Simd<[u128; 1]>> for i128x1[src]

impl FromBits<Simd<[u16; 8]>> for i128x1[src]

impl FromBits<Simd<[u32; 4]>> for i128x1[src]

impl FromBits<Simd<[u64; 2]>> for i128x1[src]

impl FromBits<Simd<[u8; 16]>> for i128x1[src]

impl FromBits<__m128> for i128x1[src]

impl FromBits<__m128d> for i128x1[src]

impl FromBits<__m128i> for i128x1[src]

impl FromCast<Simd<[m128; 1]>> for i128x1[src]

impl FromCast<Simd<[u128; 1]>> for i128x1[src]

impl Hash for i128x1[src]

impl LowerHex for i128x1[src]

impl Mul<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i128> for i128x1[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i128; 1]>> for i128x1[src]

impl MulAssign<i128> for i128x1[src]

impl Neg for i128x1[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i128x1[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i128x1[src]

impl PartialEq<Simd<[i128; 1]>> for i128x1[src]

impl<'a> Product<&'a Simd<[i128; 1]>> for i128x1[src]

impl Product<Simd<[i128; 1]>> for i128x1[src]

impl Rem<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i128> for i128x1[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i128; 1]>> for i128x1[src]

impl RemAssign<i128> for i128x1[src]

impl Shl<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i128x1[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i128; 1]>> for i128x1[src]

impl ShlAssign<u32> for i128x1[src]

impl Shr<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i128x1[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i128; 1]>> for i128x1[src]

impl ShrAssign<u32> for i128x1[src]

impl Simd for i128x1[src]

type Element = i128

Element type of the SIMD vector

+

type LanesType = [u32; 1]

The type: [u32; Self::N].

+

impl Sub<Simd<[i128; 1]>> for i128x1[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i128> for i128x1[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i128; 1]>> for i128x1[src]

impl SubAssign<i128> for i128x1[src]

impl<'a> Sum<&'a Simd<[i128; 1]>> for i128x1[src]

impl Sum<Simd<[i128; 1]>> for i128x1[src]

impl UpperHex for i128x1[src]

\ No newline at end of file diff --git a/packed_simd/type.i128x2.html b/packed_simd/type.i128x2.html new file mode 100644 index 000000000..67661af4a --- /dev/null +++ b/packed_simd/type.i128x2.html @@ -0,0 +1,258 @@ +packed_simd::i128x2 - Rust

[][src]Type Definition packed_simd::i128x2

type i128x2 = Simd<[i128; 2]>;

A 256-bit vector with 2 i128 lanes.

+

Implementations

impl i128x2[src]

pub const fn new(x0: i128, x1: i128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i128x2[src]

pub fn rotate_left(self, n: i128x2) -> i128x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i128x2) -> i128x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i128x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i128x2[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i128x2[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

+

pub fn min_element(self) -> i128[src]

Smallest vector element value.

+

impl i128x2[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i128x2[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i128x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i128x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i128x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i128x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i128x2[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl i128x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl i128x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x2>[src]

Returns a wrapper that implements Ord.

+

impl i128x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i128> for i128x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i128; 2]>> for i128x2[src]

impl AddAssign<i128> for i128x2[src]

impl Binary for i128x2[src]

impl BitAnd<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i128> for i128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitAndAssign<i128> for i128x2[src]

impl BitOr<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i128> for i128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitOrAssign<i128> for i128x2[src]

impl BitXor<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i128> for i128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i128; 2]>> for i128x2[src]

impl BitXorAssign<i128> for i128x2[src]

impl Debug for i128x2[src]

impl Default for i128x2[src]

impl Div<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i128> for i128x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i128; 2]>> for i128x2[src]

impl DivAssign<i128> for i128x2[src]

impl Eq for i128x2[src]

impl From<[i128; 2]> for i128x2[src]

impl From<Simd<[i16; 2]>> for i128x2[src]

impl From<Simd<[i32; 2]>> for i128x2[src]

impl From<Simd<[i64; 2]>> for i128x2[src]

impl From<Simd<[i8; 2]>> for i128x2[src]

impl From<Simd<[u16; 2]>> for i128x2[src]

impl From<Simd<[u32; 2]>> for i128x2[src]

impl From<Simd<[u64; 2]>> for i128x2[src]

impl From<Simd<[u8; 2]>> for i128x2[src]

impl FromBits<Simd<[f32; 8]>> for i128x2[src]

impl FromBits<Simd<[f64; 4]>> for i128x2[src]

impl FromBits<Simd<[i16; 16]>> for i128x2[src]

impl FromBits<Simd<[i32; 8]>> for i128x2[src]

impl FromBits<Simd<[i64; 4]>> for i128x2[src]

impl FromBits<Simd<[i8; 32]>> for i128x2[src]

impl FromBits<Simd<[m128; 2]>> for i128x2[src]

impl FromBits<Simd<[m16; 16]>> for i128x2[src]

impl FromBits<Simd<[m32; 8]>> for i128x2[src]

impl FromBits<Simd<[m64; 4]>> for i128x2[src]

impl FromBits<Simd<[m8; 32]>> for i128x2[src]

impl FromBits<Simd<[u128; 2]>> for i128x2[src]

impl FromBits<Simd<[u16; 16]>> for i128x2[src]

impl FromBits<Simd<[u32; 8]>> for i128x2[src]

impl FromBits<Simd<[u64; 4]>> for i128x2[src]

impl FromBits<Simd<[u8; 32]>> for i128x2[src]

impl FromBits<__m256> for i128x2[src]

impl FromBits<__m256d> for i128x2[src]

impl FromBits<__m256i> for i128x2[src]

impl FromCast<Simd<[f32; 2]>> for i128x2[src]

impl FromCast<Simd<[f64; 2]>> for i128x2[src]

impl FromCast<Simd<[i16; 2]>> for i128x2[src]

impl FromCast<Simd<[i32; 2]>> for i128x2[src]

impl FromCast<Simd<[i64; 2]>> for i128x2[src]

impl FromCast<Simd<[i8; 2]>> for i128x2[src]

impl FromCast<Simd<[isize; 2]>> for i128x2[src]

impl FromCast<Simd<[m128; 2]>> for i128x2[src]

impl FromCast<Simd<[m16; 2]>> for i128x2[src]

impl FromCast<Simd<[m32; 2]>> for i128x2[src]

impl FromCast<Simd<[m64; 2]>> for i128x2[src]

impl FromCast<Simd<[m8; 2]>> for i128x2[src]

impl FromCast<Simd<[msize; 2]>> for i128x2[src]

impl FromCast<Simd<[u128; 2]>> for i128x2[src]

impl FromCast<Simd<[u16; 2]>> for i128x2[src]

impl FromCast<Simd<[u32; 2]>> for i128x2[src]

impl FromCast<Simd<[u64; 2]>> for i128x2[src]

impl FromCast<Simd<[u8; 2]>> for i128x2[src]

impl FromCast<Simd<[usize; 2]>> for i128x2[src]

impl Hash for i128x2[src]

impl LowerHex for i128x2[src]

impl Mul<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i128> for i128x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i128; 2]>> for i128x2[src]

impl MulAssign<i128> for i128x2[src]

impl Neg for i128x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i128x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i128x2[src]

impl PartialEq<Simd<[i128; 2]>> for i128x2[src]

impl<'a> Product<&'a Simd<[i128; 2]>> for i128x2[src]

impl Product<Simd<[i128; 2]>> for i128x2[src]

impl Rem<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i128> for i128x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i128; 2]>> for i128x2[src]

impl RemAssign<i128> for i128x2[src]

impl Shl<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i128x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i128; 2]>> for i128x2[src]

impl ShlAssign<u32> for i128x2[src]

impl Shr<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i128x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i128; 2]>> for i128x2[src]

impl ShrAssign<u32> for i128x2[src]

impl Simd for i128x2[src]

type Element = i128

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[i128; 2]>> for i128x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i128> for i128x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i128; 2]>> for i128x2[src]

impl SubAssign<i128> for i128x2[src]

impl<'a> Sum<&'a Simd<[i128; 2]>> for i128x2[src]

impl Sum<Simd<[i128; 2]>> for i128x2[src]

impl UpperHex for i128x2[src]

\ No newline at end of file diff --git a/packed_simd/type.i128x4.html b/packed_simd/type.i128x4.html new file mode 100644 index 000000000..a2cfeecac --- /dev/null +++ b/packed_simd/type.i128x4.html @@ -0,0 +1,255 @@ +packed_simd::i128x4 - Rust

[][src]Type Definition packed_simd::i128x4

type i128x4 = Simd<[i128; 4]>;

A 512-bit vector with 4 i128 lanes.

+

Implementations

impl i128x4[src]

pub const fn new(x0: i128, x1: i128, x2: i128, x3: i128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i128x4[src]

pub fn rotate_left(self, n: i128x4) -> i128x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i128x4) -> i128x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i128x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i128x4[src]

pub fn wrapping_sum(self) -> i128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i128x4[src]

pub fn max_element(self) -> i128[src]

Largest vector element value.

+

pub fn min_element(self) -> i128[src]

Smallest vector element value.

+

impl i128x4[src]

pub fn and(self) -> i128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i128x4[src]

pub fn from_slice_aligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i128x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i128x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i128x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i128x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i128x4[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl i128x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i128x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl i128x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i128x4>[src]

Returns a wrapper that implements Ord.

+

impl i128x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i128> for i128x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i128; 4]>> for i128x4[src]

impl AddAssign<i128> for i128x4[src]

impl Binary for i128x4[src]

impl BitAnd<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i128> for i128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitAndAssign<i128> for i128x4[src]

impl BitOr<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i128> for i128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitOrAssign<i128> for i128x4[src]

impl BitXor<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i128> for i128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i128; 4]>> for i128x4[src]

impl BitXorAssign<i128> for i128x4[src]

impl Debug for i128x4[src]

impl Default for i128x4[src]

impl Div<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i128> for i128x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i128; 4]>> for i128x4[src]

impl DivAssign<i128> for i128x4[src]

impl Eq for i128x4[src]

impl From<[i128; 4]> for i128x4[src]

impl From<Simd<[i16; 4]>> for i128x4[src]

impl From<Simd<[i32; 4]>> for i128x4[src]

impl From<Simd<[i64; 4]>> for i128x4[src]

impl From<Simd<[i8; 4]>> for i128x4[src]

impl From<Simd<[u16; 4]>> for i128x4[src]

impl From<Simd<[u32; 4]>> for i128x4[src]

impl From<Simd<[u64; 4]>> for i128x4[src]

impl From<Simd<[u8; 4]>> for i128x4[src]

impl FromBits<Simd<[f32; 16]>> for i128x4[src]

impl FromBits<Simd<[f64; 8]>> for i128x4[src]

impl FromBits<Simd<[i16; 32]>> for i128x4[src]

impl FromBits<Simd<[i32; 16]>> for i128x4[src]

impl FromBits<Simd<[i64; 8]>> for i128x4[src]

impl FromBits<Simd<[i8; 64]>> for i128x4[src]

impl FromBits<Simd<[m128; 4]>> for i128x4[src]

impl FromBits<Simd<[m16; 32]>> for i128x4[src]

impl FromBits<Simd<[m32; 16]>> for i128x4[src]

impl FromBits<Simd<[m64; 8]>> for i128x4[src]

impl FromBits<Simd<[m8; 64]>> for i128x4[src]

impl FromBits<Simd<[u128; 4]>> for i128x4[src]

impl FromBits<Simd<[u16; 32]>> for i128x4[src]

impl FromBits<Simd<[u32; 16]>> for i128x4[src]

impl FromBits<Simd<[u64; 8]>> for i128x4[src]

impl FromBits<Simd<[u8; 64]>> for i128x4[src]

impl FromCast<Simd<[f32; 4]>> for i128x4[src]

impl FromCast<Simd<[f64; 4]>> for i128x4[src]

impl FromCast<Simd<[i16; 4]>> for i128x4[src]

impl FromCast<Simd<[i32; 4]>> for i128x4[src]

impl FromCast<Simd<[i64; 4]>> for i128x4[src]

impl FromCast<Simd<[i8; 4]>> for i128x4[src]

impl FromCast<Simd<[isize; 4]>> for i128x4[src]

impl FromCast<Simd<[m128; 4]>> for i128x4[src]

impl FromCast<Simd<[m16; 4]>> for i128x4[src]

impl FromCast<Simd<[m32; 4]>> for i128x4[src]

impl FromCast<Simd<[m64; 4]>> for i128x4[src]

impl FromCast<Simd<[m8; 4]>> for i128x4[src]

impl FromCast<Simd<[msize; 4]>> for i128x4[src]

impl FromCast<Simd<[u128; 4]>> for i128x4[src]

impl FromCast<Simd<[u16; 4]>> for i128x4[src]

impl FromCast<Simd<[u32; 4]>> for i128x4[src]

impl FromCast<Simd<[u64; 4]>> for i128x4[src]

impl FromCast<Simd<[u8; 4]>> for i128x4[src]

impl FromCast<Simd<[usize; 4]>> for i128x4[src]

impl Hash for i128x4[src]

impl LowerHex for i128x4[src]

impl Mul<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i128> for i128x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i128; 4]>> for i128x4[src]

impl MulAssign<i128> for i128x4[src]

impl Neg for i128x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i128x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i128x4[src]

impl PartialEq<Simd<[i128; 4]>> for i128x4[src]

impl<'a> Product<&'a Simd<[i128; 4]>> for i128x4[src]

impl Product<Simd<[i128; 4]>> for i128x4[src]

impl Rem<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i128> for i128x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i128; 4]>> for i128x4[src]

impl RemAssign<i128> for i128x4[src]

impl Shl<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i128x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i128; 4]>> for i128x4[src]

impl ShlAssign<u32> for i128x4[src]

impl Shr<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i128x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i128; 4]>> for i128x4[src]

impl ShrAssign<u32> for i128x4[src]

impl Simd for i128x4[src]

type Element = i128

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[i128; 4]>> for i128x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i128> for i128x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i128; 4]>> for i128x4[src]

impl SubAssign<i128> for i128x4[src]

impl<'a> Sum<&'a Simd<[i128; 4]>> for i128x4[src]

impl Sum<Simd<[i128; 4]>> for i128x4[src]

impl UpperHex for i128x4[src]

\ No newline at end of file diff --git a/packed_simd/type.i16x16.html b/packed_simd/type.i16x16.html new file mode 100644 index 000000000..2345e0c28 --- /dev/null +++ b/packed_simd/type.i16x16.html @@ -0,0 +1,242 @@ +packed_simd::i16x16 - Rust

[][src]Type Definition packed_simd::i16x16

type i16x16 = Simd<[i16; 16]>;

A 256-bit vector with 16 i16 lanes.

+

Implementations

impl i16x16[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16,
    x8: i16,
    x9: i16,
    x10: i16,
    x11: i16,
    x12: i16,
    x13: i16,
    x14: i16,
    x15: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i16x16[src]

pub fn rotate_left(self, n: i16x16) -> i16x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x16) -> i16x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i16x16[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i16x16[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i16x16[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl i16x16[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i16x16[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x16[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x16[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i16x16[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i16x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i16x16[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl i16x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl i16x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x16>[src]

Returns a wrapper that implements Ord.

+

impl i16x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i16> for i16x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i16; 16]>> for i16x16[src]

impl AddAssign<i16> for i16x16[src]

impl Binary for i16x16[src]

impl BitAnd<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i16> for i16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitAndAssign<i16> for i16x16[src]

impl BitOr<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i16> for i16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitOrAssign<i16> for i16x16[src]

impl BitXor<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i16> for i16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i16; 16]>> for i16x16[src]

impl BitXorAssign<i16> for i16x16[src]

impl Debug for i16x16[src]

impl Default for i16x16[src]

impl Div<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i16> for i16x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i16; 16]>> for i16x16[src]

impl DivAssign<i16> for i16x16[src]

impl Eq for i16x16[src]

impl From<[i16; 16]> for i16x16[src]

impl From<Simd<[i8; 16]>> for i16x16[src]

impl From<Simd<[u8; 16]>> for i16x16[src]

impl FromBits<Simd<[f32; 8]>> for i16x16[src]

impl FromBits<Simd<[f64; 4]>> for i16x16[src]

impl FromBits<Simd<[i128; 2]>> for i16x16[src]

impl FromBits<Simd<[i32; 8]>> for i16x16[src]

impl FromBits<Simd<[i64; 4]>> for i16x16[src]

impl FromBits<Simd<[i8; 32]>> for i16x16[src]

impl FromBits<Simd<[m128; 2]>> for i16x16[src]

impl FromBits<Simd<[m16; 16]>> for i16x16[src]

impl FromBits<Simd<[m32; 8]>> for i16x16[src]

impl FromBits<Simd<[m64; 4]>> for i16x16[src]

impl FromBits<Simd<[m8; 32]>> for i16x16[src]

impl FromBits<Simd<[u128; 2]>> for i16x16[src]

impl FromBits<Simd<[u16; 16]>> for i16x16[src]

impl FromBits<Simd<[u32; 8]>> for i16x16[src]

impl FromBits<Simd<[u64; 4]>> for i16x16[src]

impl FromBits<Simd<[u8; 32]>> for i16x16[src]

impl FromBits<__m256> for i16x16[src]

impl FromBits<__m256d> for i16x16[src]

impl FromBits<__m256i> for i16x16[src]

impl FromCast<Simd<[f32; 16]>> for i16x16[src]

impl FromCast<Simd<[i32; 16]>> for i16x16[src]

impl FromCast<Simd<[i8; 16]>> for i16x16[src]

impl FromCast<Simd<[m16; 16]>> for i16x16[src]

impl FromCast<Simd<[m32; 16]>> for i16x16[src]

impl FromCast<Simd<[m8; 16]>> for i16x16[src]

impl FromCast<Simd<[u16; 16]>> for i16x16[src]

impl FromCast<Simd<[u32; 16]>> for i16x16[src]

impl FromCast<Simd<[u8; 16]>> for i16x16[src]

impl Hash for i16x16[src]

impl LowerHex for i16x16[src]

impl Mul<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i16> for i16x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i16; 16]>> for i16x16[src]

impl MulAssign<i16> for i16x16[src]

impl Neg for i16x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i16x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i16x16[src]

impl PartialEq<Simd<[i16; 16]>> for i16x16[src]

impl<'a> Product<&'a Simd<[i16; 16]>> for i16x16[src]

impl Product<Simd<[i16; 16]>> for i16x16[src]

impl Rem<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i16> for i16x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i16; 16]>> for i16x16[src]

impl RemAssign<i16> for i16x16[src]

impl Shl<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i16x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i16; 16]>> for i16x16[src]

impl ShlAssign<u32> for i16x16[src]

impl Shr<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i16x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i16; 16]>> for i16x16[src]

impl ShrAssign<u32> for i16x16[src]

impl Simd for i16x16[src]

type Element = i16

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+

impl Sub<Simd<[i16; 16]>> for i16x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i16> for i16x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i16; 16]>> for i16x16[src]

impl SubAssign<i16> for i16x16[src]

impl<'a> Sum<&'a Simd<[i16; 16]>> for i16x16[src]

impl Sum<Simd<[i16; 16]>> for i16x16[src]

impl UpperHex for i16x16[src]

\ No newline at end of file diff --git a/packed_simd/type.i16x2.html b/packed_simd/type.i16x2.html new file mode 100644 index 000000000..979ee9587 --- /dev/null +++ b/packed_simd/type.i16x2.html @@ -0,0 +1,238 @@ +packed_simd::i16x2 - Rust

[][src]Type Definition packed_simd::i16x2

type i16x2 = Simd<[i16; 2]>;

A 32-bit vector with 2 i16 lanes.

+

Implementations

impl i16x2[src]

pub const fn new(x0: i16, x1: i16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i16x2[src]

pub fn rotate_left(self, n: i16x2) -> i16x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x2) -> i16x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i16x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i16x2[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i16x2[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl i16x2[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i16x2[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i16x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i16x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i16x2[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl i16x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl i16x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x2>[src]

Returns a wrapper that implements Ord.

+

impl i16x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i16> for i16x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i16; 2]>> for i16x2[src]

impl AddAssign<i16> for i16x2[src]

impl Binary for i16x2[src]

impl BitAnd<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i16> for i16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitAndAssign<i16> for i16x2[src]

impl BitOr<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i16> for i16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitOrAssign<i16> for i16x2[src]

impl BitXor<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i16> for i16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i16; 2]>> for i16x2[src]

impl BitXorAssign<i16> for i16x2[src]

impl Debug for i16x2[src]

impl Default for i16x2[src]

impl Div<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i16> for i16x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i16; 2]>> for i16x2[src]

impl DivAssign<i16> for i16x2[src]

impl Eq for i16x2[src]

impl From<[i16; 2]> for i16x2[src]

impl From<Simd<[i8; 2]>> for i16x2[src]

impl From<Simd<[u8; 2]>> for i16x2[src]

impl FromBits<Simd<[i8; 4]>> for i16x2[src]

impl FromBits<Simd<[m16; 2]>> for i16x2[src]

impl FromBits<Simd<[m8; 4]>> for i16x2[src]

impl FromBits<Simd<[u16; 2]>> for i16x2[src]

impl FromBits<Simd<[u8; 4]>> for i16x2[src]

impl FromCast<Simd<[f32; 2]>> for i16x2[src]

impl FromCast<Simd<[f64; 2]>> for i16x2[src]

impl FromCast<Simd<[i128; 2]>> for i16x2[src]

impl FromCast<Simd<[i32; 2]>> for i16x2[src]

impl FromCast<Simd<[i64; 2]>> for i16x2[src]

impl FromCast<Simd<[i8; 2]>> for i16x2[src]

impl FromCast<Simd<[isize; 2]>> for i16x2[src]

impl FromCast<Simd<[m128; 2]>> for i16x2[src]

impl FromCast<Simd<[m16; 2]>> for i16x2[src]

impl FromCast<Simd<[m32; 2]>> for i16x2[src]

impl FromCast<Simd<[m64; 2]>> for i16x2[src]

impl FromCast<Simd<[m8; 2]>> for i16x2[src]

impl FromCast<Simd<[msize; 2]>> for i16x2[src]

impl FromCast<Simd<[u128; 2]>> for i16x2[src]

impl FromCast<Simd<[u16; 2]>> for i16x2[src]

impl FromCast<Simd<[u32; 2]>> for i16x2[src]

impl FromCast<Simd<[u64; 2]>> for i16x2[src]

impl FromCast<Simd<[u8; 2]>> for i16x2[src]

impl FromCast<Simd<[usize; 2]>> for i16x2[src]

impl Hash for i16x2[src]

impl LowerHex for i16x2[src]

impl Mul<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i16> for i16x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i16; 2]>> for i16x2[src]

impl MulAssign<i16> for i16x2[src]

impl Neg for i16x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i16x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i16x2[src]

impl PartialEq<Simd<[i16; 2]>> for i16x2[src]

impl<'a> Product<&'a Simd<[i16; 2]>> for i16x2[src]

impl Product<Simd<[i16; 2]>> for i16x2[src]

impl Rem<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i16> for i16x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i16; 2]>> for i16x2[src]

impl RemAssign<i16> for i16x2[src]

impl Shl<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i16x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i16; 2]>> for i16x2[src]

impl ShlAssign<u32> for i16x2[src]

impl Shr<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i16x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i16; 2]>> for i16x2[src]

impl ShrAssign<u32> for i16x2[src]

impl Simd for i16x2[src]

type Element = i16

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[i16; 2]>> for i16x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i16> for i16x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i16; 2]>> for i16x2[src]

impl SubAssign<i16> for i16x2[src]

impl<'a> Sum<&'a Simd<[i16; 2]>> for i16x2[src]

impl Sum<Simd<[i16; 2]>> for i16x2[src]

impl UpperHex for i16x2[src]

\ No newline at end of file diff --git a/packed_simd/type.i16x32.html b/packed_simd/type.i16x32.html new file mode 100644 index 000000000..5b6465588 --- /dev/null +++ b/packed_simd/type.i16x32.html @@ -0,0 +1,235 @@ +packed_simd::i16x32 - Rust

[][src]Type Definition packed_simd::i16x32

type i16x32 = Simd<[i16; 32]>;

A 512-bit vector with 32 i16 lanes.

+

Implementations

impl i16x32[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16,
    x8: i16,
    x9: i16,
    x10: i16,
    x11: i16,
    x12: i16,
    x13: i16,
    x14: i16,
    x15: i16,
    x16: i16,
    x17: i16,
    x18: i16,
    x19: i16,
    x20: i16,
    x21: i16,
    x22: i16,
    x23: i16,
    x24: i16,
    x25: i16,
    x26: i16,
    x27: i16,
    x28: i16,
    x29: i16,
    x30: i16,
    x31: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i16x32[src]

pub fn rotate_left(self, n: i16x32) -> i16x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x32) -> i16x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i16x32[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i16x32[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i16x32[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl i16x32[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i16x32[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x32[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x32[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i16x32[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i16x32[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i16x32[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl i16x32[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl i16x32[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x32>[src]

Returns a wrapper that implements Ord.

+

impl i16x32[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i16> for i16x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i16; 32]>> for i16x32[src]

impl AddAssign<i16> for i16x32[src]

impl Binary for i16x32[src]

impl BitAnd<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i16> for i16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitAndAssign<i16> for i16x32[src]

impl BitOr<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i16> for i16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitOrAssign<i16> for i16x32[src]

impl BitXor<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i16> for i16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i16; 32]>> for i16x32[src]

impl BitXorAssign<i16> for i16x32[src]

impl Debug for i16x32[src]

impl Default for i16x32[src]

impl Div<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i16> for i16x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i16; 32]>> for i16x32[src]

impl DivAssign<i16> for i16x32[src]

impl Eq for i16x32[src]

impl From<[i16; 32]> for i16x32[src]

impl From<Simd<[i8; 32]>> for i16x32[src]

impl From<Simd<[u8; 32]>> for i16x32[src]

impl FromBits<Simd<[f32; 16]>> for i16x32[src]

impl FromBits<Simd<[f64; 8]>> for i16x32[src]

impl FromBits<Simd<[i128; 4]>> for i16x32[src]

impl FromBits<Simd<[i32; 16]>> for i16x32[src]

impl FromBits<Simd<[i64; 8]>> for i16x32[src]

impl FromBits<Simd<[i8; 64]>> for i16x32[src]

impl FromBits<Simd<[m128; 4]>> for i16x32[src]

impl FromBits<Simd<[m16; 32]>> for i16x32[src]

impl FromBits<Simd<[m32; 16]>> for i16x32[src]

impl FromBits<Simd<[m64; 8]>> for i16x32[src]

impl FromBits<Simd<[m8; 64]>> for i16x32[src]

impl FromBits<Simd<[u128; 4]>> for i16x32[src]

impl FromBits<Simd<[u16; 32]>> for i16x32[src]

impl FromBits<Simd<[u32; 16]>> for i16x32[src]

impl FromBits<Simd<[u64; 8]>> for i16x32[src]

impl FromBits<Simd<[u8; 64]>> for i16x32[src]

impl FromCast<Simd<[i8; 32]>> for i16x32[src]

impl FromCast<Simd<[m16; 32]>> for i16x32[src]

impl FromCast<Simd<[m8; 32]>> for i16x32[src]

impl FromCast<Simd<[u16; 32]>> for i16x32[src]

impl FromCast<Simd<[u8; 32]>> for i16x32[src]

impl Hash for i16x32[src]

impl LowerHex for i16x32[src]

impl Mul<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i16> for i16x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i16; 32]>> for i16x32[src]

impl MulAssign<i16> for i16x32[src]

impl Neg for i16x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i16x32[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i16x32[src]

impl PartialEq<Simd<[i16; 32]>> for i16x32[src]

impl<'a> Product<&'a Simd<[i16; 32]>> for i16x32[src]

impl Product<Simd<[i16; 32]>> for i16x32[src]

impl Rem<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i16> for i16x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i16; 32]>> for i16x32[src]

impl RemAssign<i16> for i16x32[src]

impl Shl<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i16x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i16; 32]>> for i16x32[src]

impl ShlAssign<u32> for i16x32[src]

impl Shr<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i16x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i16; 32]>> for i16x32[src]

impl ShrAssign<u32> for i16x32[src]

impl Simd for i16x32[src]

type Element = i16

Element type of the SIMD vector

+

type LanesType = [u32; 32]

The type: [u32; Self::N].

+

impl Sub<Simd<[i16; 32]>> for i16x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i16> for i16x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i16; 32]>> for i16x32[src]

impl SubAssign<i16> for i16x32[src]

impl<'a> Sum<&'a Simd<[i16; 32]>> for i16x32[src]

impl Sum<Simd<[i16; 32]>> for i16x32[src]

impl UpperHex for i16x32[src]

\ No newline at end of file diff --git a/packed_simd/type.i16x4.html b/packed_simd/type.i16x4.html new file mode 100644 index 000000000..81a87fecf --- /dev/null +++ b/packed_simd/type.i16x4.html @@ -0,0 +1,243 @@ +packed_simd::i16x4 - Rust

[][src]Type Definition packed_simd::i16x4

type i16x4 = Simd<[i16; 4]>;

A 64-bit vector with 4 i16 lanes.

+

Implementations

impl i16x4[src]

pub const fn new(x0: i16, x1: i16, x2: i16, x3: i16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i16x4[src]

pub fn rotate_left(self, n: i16x4) -> i16x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x4) -> i16x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i16x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i16x4[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i16x4[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl i16x4[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i16x4[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i16x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i16x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i16x4[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl i16x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl i16x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x4>[src]

Returns a wrapper that implements Ord.

+

impl i16x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i16> for i16x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i16; 4]>> for i16x4[src]

impl AddAssign<i16> for i16x4[src]

impl Binary for i16x4[src]

impl BitAnd<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i16> for i16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitAndAssign<i16> for i16x4[src]

impl BitOr<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i16> for i16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitOrAssign<i16> for i16x4[src]

impl BitXor<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i16> for i16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i16; 4]>> for i16x4[src]

impl BitXorAssign<i16> for i16x4[src]

impl Debug for i16x4[src]

impl Default for i16x4[src]

impl Div<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i16> for i16x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i16; 4]>> for i16x4[src]

impl DivAssign<i16> for i16x4[src]

impl Eq for i16x4[src]

impl From<[i16; 4]> for i16x4[src]

impl From<Simd<[i8; 4]>> for i16x4[src]

impl From<Simd<[u8; 4]>> for i16x4[src]

impl FromBits<Simd<[f32; 2]>> for i16x4[src]

impl FromBits<Simd<[i32; 2]>> for i16x4[src]

impl FromBits<Simd<[i8; 8]>> for i16x4[src]

impl FromBits<Simd<[m16; 4]>> for i16x4[src]

impl FromBits<Simd<[m32; 2]>> for i16x4[src]

impl FromBits<Simd<[m8; 8]>> for i16x4[src]

impl FromBits<Simd<[u16; 4]>> for i16x4[src]

impl FromBits<Simd<[u32; 2]>> for i16x4[src]

impl FromBits<Simd<[u8; 8]>> for i16x4[src]

impl FromBits<__m64> for i16x4[src]

impl FromCast<Simd<[f32; 4]>> for i16x4[src]

impl FromCast<Simd<[f64; 4]>> for i16x4[src]

impl FromCast<Simd<[i128; 4]>> for i16x4[src]

impl FromCast<Simd<[i32; 4]>> for i16x4[src]

impl FromCast<Simd<[i64; 4]>> for i16x4[src]

impl FromCast<Simd<[i8; 4]>> for i16x4[src]

impl FromCast<Simd<[isize; 4]>> for i16x4[src]

impl FromCast<Simd<[m128; 4]>> for i16x4[src]

impl FromCast<Simd<[m16; 4]>> for i16x4[src]

impl FromCast<Simd<[m32; 4]>> for i16x4[src]

impl FromCast<Simd<[m64; 4]>> for i16x4[src]

impl FromCast<Simd<[m8; 4]>> for i16x4[src]

impl FromCast<Simd<[msize; 4]>> for i16x4[src]

impl FromCast<Simd<[u128; 4]>> for i16x4[src]

impl FromCast<Simd<[u16; 4]>> for i16x4[src]

impl FromCast<Simd<[u32; 4]>> for i16x4[src]

impl FromCast<Simd<[u64; 4]>> for i16x4[src]

impl FromCast<Simd<[u8; 4]>> for i16x4[src]

impl FromCast<Simd<[usize; 4]>> for i16x4[src]

impl Hash for i16x4[src]

impl LowerHex for i16x4[src]

impl Mul<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i16> for i16x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i16; 4]>> for i16x4[src]

impl MulAssign<i16> for i16x4[src]

impl Neg for i16x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i16x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i16x4[src]

impl PartialEq<Simd<[i16; 4]>> for i16x4[src]

impl<'a> Product<&'a Simd<[i16; 4]>> for i16x4[src]

impl Product<Simd<[i16; 4]>> for i16x4[src]

impl Rem<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i16> for i16x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i16; 4]>> for i16x4[src]

impl RemAssign<i16> for i16x4[src]

impl Shl<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i16x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i16; 4]>> for i16x4[src]

impl ShlAssign<u32> for i16x4[src]

impl Shr<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i16x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i16; 4]>> for i16x4[src]

impl ShrAssign<u32> for i16x4[src]

impl Simd for i16x4[src]

type Element = i16

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[i16; 4]>> for i16x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i16> for i16x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i16; 4]>> for i16x4[src]

impl SubAssign<i16> for i16x4[src]

impl<'a> Sum<&'a Simd<[i16; 4]>> for i16x4[src]

impl Sum<Simd<[i16; 4]>> for i16x4[src]

impl UpperHex for i16x4[src]

\ No newline at end of file diff --git a/packed_simd/type.i16x8.html b/packed_simd/type.i16x8.html new file mode 100644 index 000000000..3a8366bab --- /dev/null +++ b/packed_simd/type.i16x8.html @@ -0,0 +1,249 @@ +packed_simd::i16x8 - Rust

[][src]Type Definition packed_simd::i16x8

type i16x8 = Simd<[i16; 8]>;

A 128-bit vector with 8 i16 lanes.

+

Implementations

impl i16x8[src]

pub const fn new(
    x0: i16,
    x1: i16,
    x2: i16,
    x3: i16,
    x4: i16,
    x5: i16,
    x6: i16,
    x7: i16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i16x8[src]

pub fn rotate_left(self, n: i16x8) -> i16x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i16x8) -> i16x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i16x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i16x8[src]

pub fn wrapping_sum(self) -> i16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i16x8[src]

pub fn max_element(self) -> i16[src]

Largest vector element value.

+

pub fn min_element(self) -> i16[src]

Smallest vector element value.

+

impl i16x8[src]

pub fn and(self) -> i16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i16x8[src]

pub fn from_slice_aligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i16x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i16x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i16x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i16x8[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl i16x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i16x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl i16x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i16x8>[src]

Returns a wrapper that implements Ord.

+

impl i16x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i16> for i16x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i16; 8]>> for i16x8[src]

impl AddAssign<i16> for i16x8[src]

impl Binary for i16x8[src]

impl BitAnd<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i16> for i16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitAndAssign<i16> for i16x8[src]

impl BitOr<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i16> for i16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitOrAssign<i16> for i16x8[src]

impl BitXor<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i16> for i16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i16; 8]>> for i16x8[src]

impl BitXorAssign<i16> for i16x8[src]

impl Debug for i16x8[src]

impl Default for i16x8[src]

impl Div<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i16> for i16x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i16; 8]>> for i16x8[src]

impl DivAssign<i16> for i16x8[src]

impl Eq for i16x8[src]

impl From<[i16; 8]> for i16x8[src]

impl From<Simd<[i8; 8]>> for i16x8[src]

impl From<Simd<[u8; 8]>> for i16x8[src]

impl FromBits<Simd<[f32; 4]>> for i16x8[src]

impl FromBits<Simd<[f64; 2]>> for i16x8[src]

impl FromBits<Simd<[i128; 1]>> for i16x8[src]

impl FromBits<Simd<[i32; 4]>> for i16x8[src]

impl FromBits<Simd<[i64; 2]>> for i16x8[src]

impl FromBits<Simd<[i8; 16]>> for i16x8[src]

impl FromBits<Simd<[m128; 1]>> for i16x8[src]

impl FromBits<Simd<[m16; 8]>> for i16x8[src]

impl FromBits<Simd<[m32; 4]>> for i16x8[src]

impl FromBits<Simd<[m64; 2]>> for i16x8[src]

impl FromBits<Simd<[m8; 16]>> for i16x8[src]

impl FromBits<Simd<[u128; 1]>> for i16x8[src]

impl FromBits<Simd<[u16; 8]>> for i16x8[src]

impl FromBits<Simd<[u32; 4]>> for i16x8[src]

impl FromBits<Simd<[u64; 2]>> for i16x8[src]

impl FromBits<Simd<[u8; 16]>> for i16x8[src]

impl FromBits<__m128> for i16x8[src]

impl FromBits<__m128d> for i16x8[src]

impl FromBits<__m128i> for i16x8[src]

impl FromCast<Simd<[f32; 8]>> for i16x8[src]

impl FromCast<Simd<[f64; 8]>> for i16x8[src]

impl FromCast<Simd<[i32; 8]>> for i16x8[src]

impl FromCast<Simd<[i64; 8]>> for i16x8[src]

impl FromCast<Simd<[i8; 8]>> for i16x8[src]

impl FromCast<Simd<[isize; 8]>> for i16x8[src]

impl FromCast<Simd<[m16; 8]>> for i16x8[src]

impl FromCast<Simd<[m32; 8]>> for i16x8[src]

impl FromCast<Simd<[m64; 8]>> for i16x8[src]

impl FromCast<Simd<[m8; 8]>> for i16x8[src]

impl FromCast<Simd<[msize; 8]>> for i16x8[src]

impl FromCast<Simd<[u16; 8]>> for i16x8[src]

impl FromCast<Simd<[u32; 8]>> for i16x8[src]

impl FromCast<Simd<[u64; 8]>> for i16x8[src]

impl FromCast<Simd<[u8; 8]>> for i16x8[src]

impl FromCast<Simd<[usize; 8]>> for i16x8[src]

impl Hash for i16x8[src]

impl LowerHex for i16x8[src]

impl Mul<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i16> for i16x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i16; 8]>> for i16x8[src]

impl MulAssign<i16> for i16x8[src]

impl Neg for i16x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i16x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i16x8[src]

impl PartialEq<Simd<[i16; 8]>> for i16x8[src]

impl<'a> Product<&'a Simd<[i16; 8]>> for i16x8[src]

impl Product<Simd<[i16; 8]>> for i16x8[src]

impl Rem<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i16> for i16x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i16; 8]>> for i16x8[src]

impl RemAssign<i16> for i16x8[src]

impl Shl<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i16x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i16; 8]>> for i16x8[src]

impl ShlAssign<u32> for i16x8[src]

impl Shr<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i16x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i16; 8]>> for i16x8[src]

impl ShrAssign<u32> for i16x8[src]

impl Simd for i16x8[src]

type Element = i16

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[i16; 8]>> for i16x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i16> for i16x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i16; 8]>> for i16x8[src]

impl SubAssign<i16> for i16x8[src]

impl<'a> Sum<&'a Simd<[i16; 8]>> for i16x8[src]

impl Sum<Simd<[i16; 8]>> for i16x8[src]

impl UpperHex for i16x8[src]

\ No newline at end of file diff --git a/packed_simd/type.i32x16.html b/packed_simd/type.i32x16.html new file mode 100644 index 000000000..db9d1fc1d --- /dev/null +++ b/packed_simd/type.i32x16.html @@ -0,0 +1,241 @@ +packed_simd::i32x16 - Rust

[][src]Type Definition packed_simd::i32x16

type i32x16 = Simd<[i32; 16]>;

A 512-bit vector with 16 i32 lanes.

+

Implementations

impl i32x16[src]

pub const fn new(
    x0: i32,
    x1: i32,
    x2: i32,
    x3: i32,
    x4: i32,
    x5: i32,
    x6: i32,
    x7: i32,
    x8: i32,
    x9: i32,
    x10: i32,
    x11: i32,
    x12: i32,
    x13: i32,
    x14: i32,
    x15: i32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i32x16[src]

pub fn rotate_left(self, n: i32x16) -> i32x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x16) -> i32x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i32x16[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i32x16[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i32x16[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl i32x16[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i32x16[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x16[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x16[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i32x16[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i32x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i32x16[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl i32x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl i32x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x16>[src]

Returns a wrapper that implements Ord.

+

impl i32x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i32> for i32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i32; 16]>> for i32x16[src]

impl AddAssign<i32> for i32x16[src]

impl Binary for i32x16[src]

impl BitAnd<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i32> for i32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitAndAssign<i32> for i32x16[src]

impl BitOr<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i32> for i32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitOrAssign<i32> for i32x16[src]

impl BitXor<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i32> for i32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i32; 16]>> for i32x16[src]

impl BitXorAssign<i32> for i32x16[src]

impl Debug for i32x16[src]

impl Default for i32x16[src]

impl Div<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i32> for i32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i32; 16]>> for i32x16[src]

impl DivAssign<i32> for i32x16[src]

impl Eq for i32x16[src]

impl From<[i32; 16]> for i32x16[src]

impl From<Simd<[i16; 16]>> for i32x16[src]

impl From<Simd<[i8; 16]>> for i32x16[src]

impl From<Simd<[u16; 16]>> for i32x16[src]

impl From<Simd<[u8; 16]>> for i32x16[src]

impl FromBits<Simd<[f32; 16]>> for i32x16[src]

impl FromBits<Simd<[f64; 8]>> for i32x16[src]

impl FromBits<Simd<[i128; 4]>> for i32x16[src]

impl FromBits<Simd<[i16; 32]>> for i32x16[src]

impl FromBits<Simd<[i64; 8]>> for i32x16[src]

impl FromBits<Simd<[i8; 64]>> for i32x16[src]

impl FromBits<Simd<[m128; 4]>> for i32x16[src]

impl FromBits<Simd<[m16; 32]>> for i32x16[src]

impl FromBits<Simd<[m32; 16]>> for i32x16[src]

impl FromBits<Simd<[m64; 8]>> for i32x16[src]

impl FromBits<Simd<[m8; 64]>> for i32x16[src]

impl FromBits<Simd<[u128; 4]>> for i32x16[src]

impl FromBits<Simd<[u16; 32]>> for i32x16[src]

impl FromBits<Simd<[u32; 16]>> for i32x16[src]

impl FromBits<Simd<[u64; 8]>> for i32x16[src]

impl FromBits<Simd<[u8; 64]>> for i32x16[src]

impl FromCast<Simd<[f32; 16]>> for i32x16[src]

impl FromCast<Simd<[i16; 16]>> for i32x16[src]

impl FromCast<Simd<[i8; 16]>> for i32x16[src]

impl FromCast<Simd<[m16; 16]>> for i32x16[src]

impl FromCast<Simd<[m32; 16]>> for i32x16[src]

impl FromCast<Simd<[m8; 16]>> for i32x16[src]

impl FromCast<Simd<[u16; 16]>> for i32x16[src]

impl FromCast<Simd<[u32; 16]>> for i32x16[src]

impl FromCast<Simd<[u8; 16]>> for i32x16[src]

impl Hash for i32x16[src]

impl LowerHex for i32x16[src]

impl Mul<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i32> for i32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i32; 16]>> for i32x16[src]

impl MulAssign<i32> for i32x16[src]

impl Neg for i32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i32x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i32x16[src]

impl PartialEq<Simd<[i32; 16]>> for i32x16[src]

impl<'a> Product<&'a Simd<[i32; 16]>> for i32x16[src]

impl Product<Simd<[i32; 16]>> for i32x16[src]

impl Rem<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i32> for i32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i32; 16]>> for i32x16[src]

impl RemAssign<i32> for i32x16[src]

impl Shl<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i32x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i32; 16]>> for i32x16[src]

impl ShlAssign<u32> for i32x16[src]

impl Shr<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i32x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i32; 16]>> for i32x16[src]

impl ShrAssign<u32> for i32x16[src]

impl Simd for i32x16[src]

type Element = i32

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+

impl Sub<Simd<[i32; 16]>> for i32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i32> for i32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i32; 16]>> for i32x16[src]

impl SubAssign<i32> for i32x16[src]

impl<'a> Sum<&'a Simd<[i32; 16]>> for i32x16[src]

impl Sum<Simd<[i32; 16]>> for i32x16[src]

impl UpperHex for i32x16[src]

\ No newline at end of file diff --git a/packed_simd/type.i32x2.html b/packed_simd/type.i32x2.html new file mode 100644 index 000000000..1782286c8 --- /dev/null +++ b/packed_simd/type.i32x2.html @@ -0,0 +1,245 @@ +packed_simd::i32x2 - Rust

[][src]Type Definition packed_simd::i32x2

type i32x2 = Simd<[i32; 2]>;

A 64-bit vector with 2 i32 lanes.

+

Implementations

impl i32x2[src]

pub const fn new(x0: i32, x1: i32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i32x2[src]

pub fn rotate_left(self, n: i32x2) -> i32x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x2) -> i32x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i32x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i32x2[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i32x2[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl i32x2[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i32x2[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i32x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i32x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i32x2[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl i32x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl i32x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x2>[src]

Returns a wrapper that implements Ord.

+

impl i32x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i32> for i32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i32; 2]>> for i32x2[src]

impl AddAssign<i32> for i32x2[src]

impl Binary for i32x2[src]

impl BitAnd<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i32> for i32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitAndAssign<i32> for i32x2[src]

impl BitOr<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i32> for i32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitOrAssign<i32> for i32x2[src]

impl BitXor<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i32> for i32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i32; 2]>> for i32x2[src]

impl BitXorAssign<i32> for i32x2[src]

impl Debug for i32x2[src]

impl Default for i32x2[src]

impl Div<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i32> for i32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i32; 2]>> for i32x2[src]

impl DivAssign<i32> for i32x2[src]

impl Eq for i32x2[src]

impl From<[i32; 2]> for i32x2[src]

impl From<Simd<[i16; 2]>> for i32x2[src]

impl From<Simd<[i8; 2]>> for i32x2[src]

impl From<Simd<[u16; 2]>> for i32x2[src]

impl From<Simd<[u8; 2]>> for i32x2[src]

impl FromBits<Simd<[f32; 2]>> for i32x2[src]

impl FromBits<Simd<[i16; 4]>> for i32x2[src]

impl FromBits<Simd<[i8; 8]>> for i32x2[src]

impl FromBits<Simd<[m16; 4]>> for i32x2[src]

impl FromBits<Simd<[m32; 2]>> for i32x2[src]

impl FromBits<Simd<[m8; 8]>> for i32x2[src]

impl FromBits<Simd<[u16; 4]>> for i32x2[src]

impl FromBits<Simd<[u32; 2]>> for i32x2[src]

impl FromBits<Simd<[u8; 8]>> for i32x2[src]

impl FromBits<__m64> for i32x2[src]

impl FromCast<Simd<[f32; 2]>> for i32x2[src]

impl FromCast<Simd<[f64; 2]>> for i32x2[src]

impl FromCast<Simd<[i128; 2]>> for i32x2[src]

impl FromCast<Simd<[i16; 2]>> for i32x2[src]

impl FromCast<Simd<[i64; 2]>> for i32x2[src]

impl FromCast<Simd<[i8; 2]>> for i32x2[src]

impl FromCast<Simd<[isize; 2]>> for i32x2[src]

impl FromCast<Simd<[m128; 2]>> for i32x2[src]

impl FromCast<Simd<[m16; 2]>> for i32x2[src]

impl FromCast<Simd<[m32; 2]>> for i32x2[src]

impl FromCast<Simd<[m64; 2]>> for i32x2[src]

impl FromCast<Simd<[m8; 2]>> for i32x2[src]

impl FromCast<Simd<[msize; 2]>> for i32x2[src]

impl FromCast<Simd<[u128; 2]>> for i32x2[src]

impl FromCast<Simd<[u16; 2]>> for i32x2[src]

impl FromCast<Simd<[u32; 2]>> for i32x2[src]

impl FromCast<Simd<[u64; 2]>> for i32x2[src]

impl FromCast<Simd<[u8; 2]>> for i32x2[src]

impl FromCast<Simd<[usize; 2]>> for i32x2[src]

impl Hash for i32x2[src]

impl LowerHex for i32x2[src]

impl Mul<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i32> for i32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i32; 2]>> for i32x2[src]

impl MulAssign<i32> for i32x2[src]

impl Neg for i32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i32x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i32x2[src]

impl PartialEq<Simd<[i32; 2]>> for i32x2[src]

impl<'a> Product<&'a Simd<[i32; 2]>> for i32x2[src]

impl Product<Simd<[i32; 2]>> for i32x2[src]

impl Rem<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i32> for i32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i32; 2]>> for i32x2[src]

impl RemAssign<i32> for i32x2[src]

impl Shl<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i32x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i32; 2]>> for i32x2[src]

impl ShlAssign<u32> for i32x2[src]

impl Shr<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i32x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i32; 2]>> for i32x2[src]

impl ShrAssign<u32> for i32x2[src]

impl Simd for i32x2[src]

type Element = i32

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[i32; 2]>> for i32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i32> for i32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i32; 2]>> for i32x2[src]

impl SubAssign<i32> for i32x2[src]

impl<'a> Sum<&'a Simd<[i32; 2]>> for i32x2[src]

impl Sum<Simd<[i32; 2]>> for i32x2[src]

impl UpperHex for i32x2[src]

\ No newline at end of file diff --git a/packed_simd/type.i32x4.html b/packed_simd/type.i32x4.html new file mode 100644 index 000000000..ebe8c7563 --- /dev/null +++ b/packed_simd/type.i32x4.html @@ -0,0 +1,254 @@ +packed_simd::i32x4 - Rust

[][src]Type Definition packed_simd::i32x4

type i32x4 = Simd<[i32; 4]>;

A 128-bit vector with 4 i32 lanes.

+

Implementations

impl i32x4[src]

pub const fn new(x0: i32, x1: i32, x2: i32, x3: i32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i32x4[src]

pub fn rotate_left(self, n: i32x4) -> i32x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x4) -> i32x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i32x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i32x4[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i32x4[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl i32x4[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i32x4[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i32x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i32x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i32x4[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl i32x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl i32x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x4>[src]

Returns a wrapper that implements Ord.

+

impl i32x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i32> for i32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i32; 4]>> for i32x4[src]

impl AddAssign<i32> for i32x4[src]

impl Binary for i32x4[src]

impl BitAnd<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i32> for i32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitAndAssign<i32> for i32x4[src]

impl BitOr<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i32> for i32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitOrAssign<i32> for i32x4[src]

impl BitXor<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i32> for i32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i32; 4]>> for i32x4[src]

impl BitXorAssign<i32> for i32x4[src]

impl Debug for i32x4[src]

impl Default for i32x4[src]

impl Div<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i32> for i32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i32; 4]>> for i32x4[src]

impl DivAssign<i32> for i32x4[src]

impl Eq for i32x4[src]

impl From<[i32; 4]> for i32x4[src]

impl From<Simd<[i16; 4]>> for i32x4[src]

impl From<Simd<[i8; 4]>> for i32x4[src]

impl From<Simd<[u16; 4]>> for i32x4[src]

impl From<Simd<[u8; 4]>> for i32x4[src]

impl FromBits<Simd<[f32; 4]>> for i32x4[src]

impl FromBits<Simd<[f64; 2]>> for i32x4[src]

impl FromBits<Simd<[i128; 1]>> for i32x4[src]

impl FromBits<Simd<[i16; 8]>> for i32x4[src]

impl FromBits<Simd<[i64; 2]>> for i32x4[src]

impl FromBits<Simd<[i8; 16]>> for i32x4[src]

impl FromBits<Simd<[m128; 1]>> for i32x4[src]

impl FromBits<Simd<[m16; 8]>> for i32x4[src]

impl FromBits<Simd<[m32; 4]>> for i32x4[src]

impl FromBits<Simd<[m64; 2]>> for i32x4[src]

impl FromBits<Simd<[m8; 16]>> for i32x4[src]

impl FromBits<Simd<[u128; 1]>> for i32x4[src]

impl FromBits<Simd<[u16; 8]>> for i32x4[src]

impl FromBits<Simd<[u32; 4]>> for i32x4[src]

impl FromBits<Simd<[u64; 2]>> for i32x4[src]

impl FromBits<Simd<[u8; 16]>> for i32x4[src]

impl FromBits<__m128> for i32x4[src]

impl FromBits<__m128d> for i32x4[src]

impl FromBits<__m128i> for i32x4[src]

impl FromCast<Simd<[f32; 4]>> for i32x4[src]

impl FromCast<Simd<[f64; 4]>> for i32x4[src]

impl FromCast<Simd<[i128; 4]>> for i32x4[src]

impl FromCast<Simd<[i16; 4]>> for i32x4[src]

impl FromCast<Simd<[i64; 4]>> for i32x4[src]

impl FromCast<Simd<[i8; 4]>> for i32x4[src]

impl FromCast<Simd<[isize; 4]>> for i32x4[src]

impl FromCast<Simd<[m128; 4]>> for i32x4[src]

impl FromCast<Simd<[m16; 4]>> for i32x4[src]

impl FromCast<Simd<[m32; 4]>> for i32x4[src]

impl FromCast<Simd<[m64; 4]>> for i32x4[src]

impl FromCast<Simd<[m8; 4]>> for i32x4[src]

impl FromCast<Simd<[msize; 4]>> for i32x4[src]

impl FromCast<Simd<[u128; 4]>> for i32x4[src]

impl FromCast<Simd<[u16; 4]>> for i32x4[src]

impl FromCast<Simd<[u32; 4]>> for i32x4[src]

impl FromCast<Simd<[u64; 4]>> for i32x4[src]

impl FromCast<Simd<[u8; 4]>> for i32x4[src]

impl FromCast<Simd<[usize; 4]>> for i32x4[src]

impl Hash for i32x4[src]

impl LowerHex for i32x4[src]

impl Mul<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i32> for i32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i32; 4]>> for i32x4[src]

impl MulAssign<i32> for i32x4[src]

impl Neg for i32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i32x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i32x4[src]

impl PartialEq<Simd<[i32; 4]>> for i32x4[src]

impl<'a> Product<&'a Simd<[i32; 4]>> for i32x4[src]

impl Product<Simd<[i32; 4]>> for i32x4[src]

impl Rem<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i32> for i32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i32; 4]>> for i32x4[src]

impl RemAssign<i32> for i32x4[src]

impl Shl<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i32x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i32; 4]>> for i32x4[src]

impl ShlAssign<u32> for i32x4[src]

impl Shr<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i32x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i32; 4]>> for i32x4[src]

impl ShrAssign<u32> for i32x4[src]

impl Simd for i32x4[src]

type Element = i32

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[i32; 4]>> for i32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i32> for i32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i32; 4]>> for i32x4[src]

impl SubAssign<i32> for i32x4[src]

impl<'a> Sum<&'a Simd<[i32; 4]>> for i32x4[src]

impl Sum<Simd<[i32; 4]>> for i32x4[src]

impl UpperHex for i32x4[src]

\ No newline at end of file diff --git a/packed_simd/type.i32x8.html b/packed_simd/type.i32x8.html new file mode 100644 index 000000000..8e9945c78 --- /dev/null +++ b/packed_simd/type.i32x8.html @@ -0,0 +1,251 @@ +packed_simd::i32x8 - Rust

[][src]Type Definition packed_simd::i32x8

type i32x8 = Simd<[i32; 8]>;

A 256-bit vector with 8 i32 lanes.

+

Implementations

impl i32x8[src]

pub const fn new(
    x0: i32,
    x1: i32,
    x2: i32,
    x3: i32,
    x4: i32,
    x5: i32,
    x6: i32,
    x7: i32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i32x8[src]

pub fn rotate_left(self, n: i32x8) -> i32x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i32x8) -> i32x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i32x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i32x8[src]

pub fn wrapping_sum(self) -> i32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i32x8[src]

pub fn max_element(self) -> i32[src]

Largest vector element value.

+

pub fn min_element(self) -> i32[src]

Smallest vector element value.

+

impl i32x8[src]

pub fn and(self) -> i32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i32x8[src]

pub fn from_slice_aligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i32x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i32x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i32x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i32x8[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl i32x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i32x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl i32x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i32x8>[src]

Returns a wrapper that implements Ord.

+

impl i32x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i32> for i32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i32; 8]>> for i32x8[src]

impl AddAssign<i32> for i32x8[src]

impl Binary for i32x8[src]

impl BitAnd<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i32> for i32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitAndAssign<i32> for i32x8[src]

impl BitOr<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i32> for i32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitOrAssign<i32> for i32x8[src]

impl BitXor<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i32> for i32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i32; 8]>> for i32x8[src]

impl BitXorAssign<i32> for i32x8[src]

impl Debug for i32x8[src]

impl Default for i32x8[src]

impl Div<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i32> for i32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i32; 8]>> for i32x8[src]

impl DivAssign<i32> for i32x8[src]

impl Eq for i32x8[src]

impl From<[i32; 8]> for i32x8[src]

impl From<Simd<[i16; 8]>> for i32x8[src]

impl From<Simd<[i8; 8]>> for i32x8[src]

impl From<Simd<[u16; 8]>> for i32x8[src]

impl From<Simd<[u8; 8]>> for i32x8[src]

impl FromBits<Simd<[f32; 8]>> for i32x8[src]

impl FromBits<Simd<[f64; 4]>> for i32x8[src]

impl FromBits<Simd<[i128; 2]>> for i32x8[src]

impl FromBits<Simd<[i16; 16]>> for i32x8[src]

impl FromBits<Simd<[i64; 4]>> for i32x8[src]

impl FromBits<Simd<[i8; 32]>> for i32x8[src]

impl FromBits<Simd<[m128; 2]>> for i32x8[src]

impl FromBits<Simd<[m16; 16]>> for i32x8[src]

impl FromBits<Simd<[m32; 8]>> for i32x8[src]

impl FromBits<Simd<[m64; 4]>> for i32x8[src]

impl FromBits<Simd<[m8; 32]>> for i32x8[src]

impl FromBits<Simd<[u128; 2]>> for i32x8[src]

impl FromBits<Simd<[u16; 16]>> for i32x8[src]

impl FromBits<Simd<[u32; 8]>> for i32x8[src]

impl FromBits<Simd<[u64; 4]>> for i32x8[src]

impl FromBits<Simd<[u8; 32]>> for i32x8[src]

impl FromBits<__m256> for i32x8[src]

impl FromBits<__m256d> for i32x8[src]

impl FromBits<__m256i> for i32x8[src]

impl FromCast<Simd<[f32; 8]>> for i32x8[src]

impl FromCast<Simd<[f64; 8]>> for i32x8[src]

impl FromCast<Simd<[i16; 8]>> for i32x8[src]

impl FromCast<Simd<[i64; 8]>> for i32x8[src]

impl FromCast<Simd<[i8; 8]>> for i32x8[src]

impl FromCast<Simd<[isize; 8]>> for i32x8[src]

impl FromCast<Simd<[m16; 8]>> for i32x8[src]

impl FromCast<Simd<[m32; 8]>> for i32x8[src]

impl FromCast<Simd<[m64; 8]>> for i32x8[src]

impl FromCast<Simd<[m8; 8]>> for i32x8[src]

impl FromCast<Simd<[msize; 8]>> for i32x8[src]

impl FromCast<Simd<[u16; 8]>> for i32x8[src]

impl FromCast<Simd<[u32; 8]>> for i32x8[src]

impl FromCast<Simd<[u64; 8]>> for i32x8[src]

impl FromCast<Simd<[u8; 8]>> for i32x8[src]

impl FromCast<Simd<[usize; 8]>> for i32x8[src]

impl Hash for i32x8[src]

impl LowerHex for i32x8[src]

impl Mul<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i32> for i32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i32; 8]>> for i32x8[src]

impl MulAssign<i32> for i32x8[src]

impl Neg for i32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i32x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i32x8[src]

impl PartialEq<Simd<[i32; 8]>> for i32x8[src]

impl<'a> Product<&'a Simd<[i32; 8]>> for i32x8[src]

impl Product<Simd<[i32; 8]>> for i32x8[src]

impl Rem<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i32> for i32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i32; 8]>> for i32x8[src]

impl RemAssign<i32> for i32x8[src]

impl Shl<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i32x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i32; 8]>> for i32x8[src]

impl ShlAssign<u32> for i32x8[src]

impl Shr<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i32x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i32; 8]>> for i32x8[src]

impl ShrAssign<u32> for i32x8[src]

impl Simd for i32x8[src]

type Element = i32

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[i32; 8]>> for i32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i32> for i32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i32; 8]>> for i32x8[src]

impl SubAssign<i32> for i32x8[src]

impl<'a> Sum<&'a Simd<[i32; 8]>> for i32x8[src]

impl Sum<Simd<[i32; 8]>> for i32x8[src]

impl UpperHex for i32x8[src]

\ No newline at end of file diff --git a/packed_simd/type.i64x2.html b/packed_simd/type.i64x2.html new file mode 100644 index 000000000..d2ebecc09 --- /dev/null +++ b/packed_simd/type.i64x2.html @@ -0,0 +1,256 @@ +packed_simd::i64x2 - Rust

[][src]Type Definition packed_simd::i64x2

type i64x2 = Simd<[i64; 2]>;

A 128-bit vector with 2 i64 lanes.

+

Implementations

impl i64x2[src]

pub const fn new(x0: i64, x1: i64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i64x2[src]

pub fn rotate_left(self, n: i64x2) -> i64x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i64x2) -> i64x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i64x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i64x2[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i64x2[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

+

pub fn min_element(self) -> i64[src]

Smallest vector element value.

+

impl i64x2[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i64x2[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i64x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i64x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i64x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i64x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i64x2[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl i64x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl i64x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x2>[src]

Returns a wrapper that implements Ord.

+

impl i64x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i64> for i64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i64; 2]>> for i64x2[src]

impl AddAssign<i64> for i64x2[src]

impl Binary for i64x2[src]

impl BitAnd<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i64> for i64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitAndAssign<i64> for i64x2[src]

impl BitOr<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i64> for i64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitOrAssign<i64> for i64x2[src]

impl BitXor<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i64> for i64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i64; 2]>> for i64x2[src]

impl BitXorAssign<i64> for i64x2[src]

impl Debug for i64x2[src]

impl Default for i64x2[src]

impl Div<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i64> for i64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i64; 2]>> for i64x2[src]

impl DivAssign<i64> for i64x2[src]

impl Eq for i64x2[src]

impl From<[i64; 2]> for i64x2[src]

impl From<Simd<[i16; 2]>> for i64x2[src]

impl From<Simd<[i32; 2]>> for i64x2[src]

impl From<Simd<[i8; 2]>> for i64x2[src]

impl From<Simd<[u16; 2]>> for i64x2[src]

impl From<Simd<[u32; 2]>> for i64x2[src]

impl From<Simd<[u8; 2]>> for i64x2[src]

impl FromBits<Simd<[f32; 4]>> for i64x2[src]

impl FromBits<Simd<[f64; 2]>> for i64x2[src]

impl FromBits<Simd<[i128; 1]>> for i64x2[src]

impl FromBits<Simd<[i16; 8]>> for i64x2[src]

impl FromBits<Simd<[i32; 4]>> for i64x2[src]

impl FromBits<Simd<[i8; 16]>> for i64x2[src]

impl FromBits<Simd<[m128; 1]>> for i64x2[src]

impl FromBits<Simd<[m16; 8]>> for i64x2[src]

impl FromBits<Simd<[m32; 4]>> for i64x2[src]

impl FromBits<Simd<[m64; 2]>> for i64x2[src]

impl FromBits<Simd<[m8; 16]>> for i64x2[src]

impl FromBits<Simd<[u128; 1]>> for i64x2[src]

impl FromBits<Simd<[u16; 8]>> for i64x2[src]

impl FromBits<Simd<[u32; 4]>> for i64x2[src]

impl FromBits<Simd<[u64; 2]>> for i64x2[src]

impl FromBits<Simd<[u8; 16]>> for i64x2[src]

impl FromBits<__m128> for i64x2[src]

impl FromBits<__m128d> for i64x2[src]

impl FromBits<__m128i> for i64x2[src]

impl FromCast<Simd<[f32; 2]>> for i64x2[src]

impl FromCast<Simd<[f64; 2]>> for i64x2[src]

impl FromCast<Simd<[i128; 2]>> for i64x2[src]

impl FromCast<Simd<[i16; 2]>> for i64x2[src]

impl FromCast<Simd<[i32; 2]>> for i64x2[src]

impl FromCast<Simd<[i8; 2]>> for i64x2[src]

impl FromCast<Simd<[isize; 2]>> for i64x2[src]

impl FromCast<Simd<[m128; 2]>> for i64x2[src]

impl FromCast<Simd<[m16; 2]>> for i64x2[src]

impl FromCast<Simd<[m32; 2]>> for i64x2[src]

impl FromCast<Simd<[m64; 2]>> for i64x2[src]

impl FromCast<Simd<[m8; 2]>> for i64x2[src]

impl FromCast<Simd<[msize; 2]>> for i64x2[src]

impl FromCast<Simd<[u128; 2]>> for i64x2[src]

impl FromCast<Simd<[u16; 2]>> for i64x2[src]

impl FromCast<Simd<[u32; 2]>> for i64x2[src]

impl FromCast<Simd<[u64; 2]>> for i64x2[src]

impl FromCast<Simd<[u8; 2]>> for i64x2[src]

impl FromCast<Simd<[usize; 2]>> for i64x2[src]

impl Hash for i64x2[src]

impl LowerHex for i64x2[src]

impl Mul<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i64> for i64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i64; 2]>> for i64x2[src]

impl MulAssign<i64> for i64x2[src]

impl Neg for i64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i64x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i64x2[src]

impl PartialEq<Simd<[i64; 2]>> for i64x2[src]

impl<'a> Product<&'a Simd<[i64; 2]>> for i64x2[src]

impl Product<Simd<[i64; 2]>> for i64x2[src]

impl Rem<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i64> for i64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i64; 2]>> for i64x2[src]

impl RemAssign<i64> for i64x2[src]

impl Shl<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i64x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i64; 2]>> for i64x2[src]

impl ShlAssign<u32> for i64x2[src]

impl Shr<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i64x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i64; 2]>> for i64x2[src]

impl ShrAssign<u32> for i64x2[src]

impl Simd for i64x2[src]

type Element = i64

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[i64; 2]>> for i64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i64> for i64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i64; 2]>> for i64x2[src]

impl SubAssign<i64> for i64x2[src]

impl<'a> Sum<&'a Simd<[i64; 2]>> for i64x2[src]

impl Sum<Simd<[i64; 2]>> for i64x2[src]

impl UpperHex for i64x2[src]

\ No newline at end of file diff --git a/packed_simd/type.i64x4.html b/packed_simd/type.i64x4.html new file mode 100644 index 000000000..33e22a4af --- /dev/null +++ b/packed_simd/type.i64x4.html @@ -0,0 +1,256 @@ +packed_simd::i64x4 - Rust

[][src]Type Definition packed_simd::i64x4

type i64x4 = Simd<[i64; 4]>;

A 256-bit vector with 4 i64 lanes.

+

Implementations

impl i64x4[src]

pub const fn new(x0: i64, x1: i64, x2: i64, x3: i64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i64x4[src]

pub fn rotate_left(self, n: i64x4) -> i64x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i64x4) -> i64x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i64x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i64x4[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i64x4[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

+

pub fn min_element(self) -> i64[src]

Smallest vector element value.

+

impl i64x4[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i64x4[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i64x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i64x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i64x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i64x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i64x4[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl i64x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl i64x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x4>[src]

Returns a wrapper that implements Ord.

+

impl i64x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i64> for i64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i64; 4]>> for i64x4[src]

impl AddAssign<i64> for i64x4[src]

impl Binary for i64x4[src]

impl BitAnd<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i64> for i64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitAndAssign<i64> for i64x4[src]

impl BitOr<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i64> for i64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitOrAssign<i64> for i64x4[src]

impl BitXor<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i64> for i64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i64; 4]>> for i64x4[src]

impl BitXorAssign<i64> for i64x4[src]

impl Debug for i64x4[src]

impl Default for i64x4[src]

impl Div<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i64> for i64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i64; 4]>> for i64x4[src]

impl DivAssign<i64> for i64x4[src]

impl Eq for i64x4[src]

impl From<[i64; 4]> for i64x4[src]

impl From<Simd<[i16; 4]>> for i64x4[src]

impl From<Simd<[i32; 4]>> for i64x4[src]

impl From<Simd<[i8; 4]>> for i64x4[src]

impl From<Simd<[u16; 4]>> for i64x4[src]

impl From<Simd<[u32; 4]>> for i64x4[src]

impl From<Simd<[u8; 4]>> for i64x4[src]

impl FromBits<Simd<[f32; 8]>> for i64x4[src]

impl FromBits<Simd<[f64; 4]>> for i64x4[src]

impl FromBits<Simd<[i128; 2]>> for i64x4[src]

impl FromBits<Simd<[i16; 16]>> for i64x4[src]

impl FromBits<Simd<[i32; 8]>> for i64x4[src]

impl FromBits<Simd<[i8; 32]>> for i64x4[src]

impl FromBits<Simd<[m128; 2]>> for i64x4[src]

impl FromBits<Simd<[m16; 16]>> for i64x4[src]

impl FromBits<Simd<[m32; 8]>> for i64x4[src]

impl FromBits<Simd<[m64; 4]>> for i64x4[src]

impl FromBits<Simd<[m8; 32]>> for i64x4[src]

impl FromBits<Simd<[u128; 2]>> for i64x4[src]

impl FromBits<Simd<[u16; 16]>> for i64x4[src]

impl FromBits<Simd<[u32; 8]>> for i64x4[src]

impl FromBits<Simd<[u64; 4]>> for i64x4[src]

impl FromBits<Simd<[u8; 32]>> for i64x4[src]

impl FromBits<__m256> for i64x4[src]

impl FromBits<__m256d> for i64x4[src]

impl FromBits<__m256i> for i64x4[src]

impl FromCast<Simd<[f32; 4]>> for i64x4[src]

impl FromCast<Simd<[f64; 4]>> for i64x4[src]

impl FromCast<Simd<[i128; 4]>> for i64x4[src]

impl FromCast<Simd<[i16; 4]>> for i64x4[src]

impl FromCast<Simd<[i32; 4]>> for i64x4[src]

impl FromCast<Simd<[i8; 4]>> for i64x4[src]

impl FromCast<Simd<[isize; 4]>> for i64x4[src]

impl FromCast<Simd<[m128; 4]>> for i64x4[src]

impl FromCast<Simd<[m16; 4]>> for i64x4[src]

impl FromCast<Simd<[m32; 4]>> for i64x4[src]

impl FromCast<Simd<[m64; 4]>> for i64x4[src]

impl FromCast<Simd<[m8; 4]>> for i64x4[src]

impl FromCast<Simd<[msize; 4]>> for i64x4[src]

impl FromCast<Simd<[u128; 4]>> for i64x4[src]

impl FromCast<Simd<[u16; 4]>> for i64x4[src]

impl FromCast<Simd<[u32; 4]>> for i64x4[src]

impl FromCast<Simd<[u64; 4]>> for i64x4[src]

impl FromCast<Simd<[u8; 4]>> for i64x4[src]

impl FromCast<Simd<[usize; 4]>> for i64x4[src]

impl Hash for i64x4[src]

impl LowerHex for i64x4[src]

impl Mul<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i64> for i64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i64; 4]>> for i64x4[src]

impl MulAssign<i64> for i64x4[src]

impl Neg for i64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i64x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i64x4[src]

impl PartialEq<Simd<[i64; 4]>> for i64x4[src]

impl<'a> Product<&'a Simd<[i64; 4]>> for i64x4[src]

impl Product<Simd<[i64; 4]>> for i64x4[src]

impl Rem<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i64> for i64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i64; 4]>> for i64x4[src]

impl RemAssign<i64> for i64x4[src]

impl Shl<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i64x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i64; 4]>> for i64x4[src]

impl ShlAssign<u32> for i64x4[src]

impl Shr<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i64x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i64; 4]>> for i64x4[src]

impl ShrAssign<u32> for i64x4[src]

impl Simd for i64x4[src]

type Element = i64

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[i64; 4]>> for i64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i64> for i64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i64; 4]>> for i64x4[src]

impl SubAssign<i64> for i64x4[src]

impl<'a> Sum<&'a Simd<[i64; 4]>> for i64x4[src]

impl Sum<Simd<[i64; 4]>> for i64x4[src]

impl UpperHex for i64x4[src]

\ No newline at end of file diff --git a/packed_simd/type.i64x8.html b/packed_simd/type.i64x8.html new file mode 100644 index 000000000..beb2c4543 --- /dev/null +++ b/packed_simd/type.i64x8.html @@ -0,0 +1,250 @@ +packed_simd::i64x8 - Rust

[][src]Type Definition packed_simd::i64x8

type i64x8 = Simd<[i64; 8]>;

A 512-bit vector with 8 i64 lanes.

+

Implementations

impl i64x8[src]

pub const fn new(
    x0: i64,
    x1: i64,
    x2: i64,
    x3: i64,
    x4: i64,
    x5: i64,
    x6: i64,
    x7: i64
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i64x8[src]

pub fn rotate_left(self, n: i64x8) -> i64x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i64x8) -> i64x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i64x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i64x8[src]

pub fn wrapping_sum(self) -> i64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i64x8[src]

pub fn max_element(self) -> i64[src]

Largest vector element value.

+

pub fn min_element(self) -> i64[src]

Smallest vector element value.

+

impl i64x8[src]

pub fn and(self) -> i64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i64x8[src]

pub fn from_slice_aligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i64x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i64x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i64x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i64x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i64x8[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl i64x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i64x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl i64x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i64x8>[src]

Returns a wrapper that implements Ord.

+

impl i64x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i64> for i64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i64; 8]>> for i64x8[src]

impl AddAssign<i64> for i64x8[src]

impl Binary for i64x8[src]

impl BitAnd<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i64> for i64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitAndAssign<i64> for i64x8[src]

impl BitOr<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i64> for i64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitOrAssign<i64> for i64x8[src]

impl BitXor<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i64> for i64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i64; 8]>> for i64x8[src]

impl BitXorAssign<i64> for i64x8[src]

impl Debug for i64x8[src]

impl Default for i64x8[src]

impl Div<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i64> for i64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i64; 8]>> for i64x8[src]

impl DivAssign<i64> for i64x8[src]

impl Eq for i64x8[src]

impl From<[i64; 8]> for i64x8[src]

impl From<Simd<[i16; 8]>> for i64x8[src]

impl From<Simd<[i32; 8]>> for i64x8[src]

impl From<Simd<[i8; 8]>> for i64x8[src]

impl From<Simd<[u16; 8]>> for i64x8[src]

impl From<Simd<[u32; 8]>> for i64x8[src]

impl From<Simd<[u8; 8]>> for i64x8[src]

impl FromBits<Simd<[f32; 16]>> for i64x8[src]

impl FromBits<Simd<[f64; 8]>> for i64x8[src]

impl FromBits<Simd<[i128; 4]>> for i64x8[src]

impl FromBits<Simd<[i16; 32]>> for i64x8[src]

impl FromBits<Simd<[i32; 16]>> for i64x8[src]

impl FromBits<Simd<[i8; 64]>> for i64x8[src]

impl FromBits<Simd<[m128; 4]>> for i64x8[src]

impl FromBits<Simd<[m16; 32]>> for i64x8[src]

impl FromBits<Simd<[m32; 16]>> for i64x8[src]

impl FromBits<Simd<[m64; 8]>> for i64x8[src]

impl FromBits<Simd<[m8; 64]>> for i64x8[src]

impl FromBits<Simd<[u128; 4]>> for i64x8[src]

impl FromBits<Simd<[u16; 32]>> for i64x8[src]

impl FromBits<Simd<[u32; 16]>> for i64x8[src]

impl FromBits<Simd<[u64; 8]>> for i64x8[src]

impl FromBits<Simd<[u8; 64]>> for i64x8[src]

impl FromCast<Simd<[f32; 8]>> for i64x8[src]

impl FromCast<Simd<[f64; 8]>> for i64x8[src]

impl FromCast<Simd<[i16; 8]>> for i64x8[src]

impl FromCast<Simd<[i32; 8]>> for i64x8[src]

impl FromCast<Simd<[i8; 8]>> for i64x8[src]

impl FromCast<Simd<[isize; 8]>> for i64x8[src]

impl FromCast<Simd<[m16; 8]>> for i64x8[src]

impl FromCast<Simd<[m32; 8]>> for i64x8[src]

impl FromCast<Simd<[m64; 8]>> for i64x8[src]

impl FromCast<Simd<[m8; 8]>> for i64x8[src]

impl FromCast<Simd<[msize; 8]>> for i64x8[src]

impl FromCast<Simd<[u16; 8]>> for i64x8[src]

impl FromCast<Simd<[u32; 8]>> for i64x8[src]

impl FromCast<Simd<[u64; 8]>> for i64x8[src]

impl FromCast<Simd<[u8; 8]>> for i64x8[src]

impl FromCast<Simd<[usize; 8]>> for i64x8[src]

impl Hash for i64x8[src]

impl LowerHex for i64x8[src]

impl Mul<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i64> for i64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i64; 8]>> for i64x8[src]

impl MulAssign<i64> for i64x8[src]

impl Neg for i64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i64x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i64x8[src]

impl PartialEq<Simd<[i64; 8]>> for i64x8[src]

impl<'a> Product<&'a Simd<[i64; 8]>> for i64x8[src]

impl Product<Simd<[i64; 8]>> for i64x8[src]

impl Rem<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i64> for i64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i64; 8]>> for i64x8[src]

impl RemAssign<i64> for i64x8[src]

impl Shl<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i64x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i64; 8]>> for i64x8[src]

impl ShlAssign<u32> for i64x8[src]

impl Shr<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i64x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i64; 8]>> for i64x8[src]

impl ShrAssign<u32> for i64x8[src]

impl Simd for i64x8[src]

type Element = i64

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[i64; 8]>> for i64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i64> for i64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i64; 8]>> for i64x8[src]

impl SubAssign<i64> for i64x8[src]

impl<'a> Sum<&'a Simd<[i64; 8]>> for i64x8[src]

impl Sum<Simd<[i64; 8]>> for i64x8[src]

impl UpperHex for i64x8[src]

\ No newline at end of file diff --git a/packed_simd/type.i8x16.html b/packed_simd/type.i8x16.html new file mode 100644 index 000000000..683ac9d01 --- /dev/null +++ b/packed_simd/type.i8x16.html @@ -0,0 +1,240 @@ +packed_simd::i8x16 - Rust

[][src]Type Definition packed_simd::i8x16

type i8x16 = Simd<[i8; 16]>;

A 128-bit vector with 16 i8 lanes.

+

Implementations

impl i8x16[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i8x16[src]

pub fn rotate_left(self, n: i8x16) -> i8x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x16) -> i8x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i8x16[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i8x16[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i8x16[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl i8x16[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i8x16[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x16[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x16[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i8x16[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i8x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i8x16[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl i8x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl i8x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x16>[src]

Returns a wrapper that implements Ord.

+

impl i8x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i8> for i8x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i8; 16]>> for i8x16[src]

impl AddAssign<i8> for i8x16[src]

impl Binary for i8x16[src]

impl BitAnd<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i8> for i8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitAndAssign<i8> for i8x16[src]

impl BitOr<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i8> for i8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitOrAssign<i8> for i8x16[src]

impl BitXor<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i8> for i8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i8; 16]>> for i8x16[src]

impl BitXorAssign<i8> for i8x16[src]

impl Debug for i8x16[src]

impl Default for i8x16[src]

impl Div<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i8> for i8x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i8; 16]>> for i8x16[src]

impl DivAssign<i8> for i8x16[src]

impl Eq for i8x16[src]

impl From<[i8; 16]> for i8x16[src]

impl FromBits<Simd<[f32; 4]>> for i8x16[src]

impl FromBits<Simd<[f64; 2]>> for i8x16[src]

impl FromBits<Simd<[i128; 1]>> for i8x16[src]

impl FromBits<Simd<[i16; 8]>> for i8x16[src]

impl FromBits<Simd<[i32; 4]>> for i8x16[src]

impl FromBits<Simd<[i64; 2]>> for i8x16[src]

impl FromBits<Simd<[m128; 1]>> for i8x16[src]

impl FromBits<Simd<[m16; 8]>> for i8x16[src]

impl FromBits<Simd<[m32; 4]>> for i8x16[src]

impl FromBits<Simd<[m64; 2]>> for i8x16[src]

impl FromBits<Simd<[m8; 16]>> for i8x16[src]

impl FromBits<Simd<[u128; 1]>> for i8x16[src]

impl FromBits<Simd<[u16; 8]>> for i8x16[src]

impl FromBits<Simd<[u32; 4]>> for i8x16[src]

impl FromBits<Simd<[u64; 2]>> for i8x16[src]

impl FromBits<Simd<[u8; 16]>> for i8x16[src]

impl FromBits<__m128> for i8x16[src]

impl FromBits<__m128d> for i8x16[src]

impl FromBits<__m128i> for i8x16[src]

impl FromCast<Simd<[f32; 16]>> for i8x16[src]

impl FromCast<Simd<[i16; 16]>> for i8x16[src]

impl FromCast<Simd<[i32; 16]>> for i8x16[src]

impl FromCast<Simd<[m16; 16]>> for i8x16[src]

impl FromCast<Simd<[m32; 16]>> for i8x16[src]

impl FromCast<Simd<[m8; 16]>> for i8x16[src]

impl FromCast<Simd<[u16; 16]>> for i8x16[src]

impl FromCast<Simd<[u32; 16]>> for i8x16[src]

impl FromCast<Simd<[u8; 16]>> for i8x16[src]

impl Hash for i8x16[src]

impl LowerHex for i8x16[src]

impl Mul<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i8> for i8x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i8; 16]>> for i8x16[src]

impl MulAssign<i8> for i8x16[src]

impl Neg for i8x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i8x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i8x16[src]

impl PartialEq<Simd<[i8; 16]>> for i8x16[src]

impl<'a> Product<&'a Simd<[i8; 16]>> for i8x16[src]

impl Product<Simd<[i8; 16]>> for i8x16[src]

impl Rem<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i8> for i8x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i8; 16]>> for i8x16[src]

impl RemAssign<i8> for i8x16[src]

impl Shl<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i8x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i8; 16]>> for i8x16[src]

impl ShlAssign<u32> for i8x16[src]

impl Shr<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i8x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i8; 16]>> for i8x16[src]

impl ShrAssign<u32> for i8x16[src]

impl Simd for i8x16[src]

type Element = i8

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+

impl Sub<Simd<[i8; 16]>> for i8x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i8> for i8x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i8; 16]>> for i8x16[src]

impl SubAssign<i8> for i8x16[src]

impl<'a> Sum<&'a Simd<[i8; 16]>> for i8x16[src]

impl Sum<Simd<[i8; 16]>> for i8x16[src]

impl UpperHex for i8x16[src]

\ No newline at end of file diff --git a/packed_simd/type.i8x2.html b/packed_simd/type.i8x2.html new file mode 100644 index 000000000..d8fd5bae7 --- /dev/null +++ b/packed_simd/type.i8x2.html @@ -0,0 +1,233 @@ +packed_simd::i8x2 - Rust

[][src]Type Definition packed_simd::i8x2

type i8x2 = Simd<[i8; 2]>;

A 16-bit vector with 2 i8 lanes.

+

Implementations

impl i8x2[src]

pub const fn new(x0: i8, x1: i8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i8x2[src]

pub fn rotate_left(self, n: i8x2) -> i8x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x2) -> i8x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i8x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i8x2[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i8x2[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl i8x2[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i8x2[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i8x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i8x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i8x2[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl i8x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl i8x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x2>[src]

Returns a wrapper that implements Ord.

+

impl i8x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i8> for i8x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i8; 2]>> for i8x2[src]

impl AddAssign<i8> for i8x2[src]

impl Binary for i8x2[src]

impl BitAnd<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i8> for i8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitAndAssign<i8> for i8x2[src]

impl BitOr<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i8> for i8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitOrAssign<i8> for i8x2[src]

impl BitXor<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i8> for i8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i8; 2]>> for i8x2[src]

impl BitXorAssign<i8> for i8x2[src]

impl Debug for i8x2[src]

impl Default for i8x2[src]

impl Div<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i8> for i8x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i8; 2]>> for i8x2[src]

impl DivAssign<i8> for i8x2[src]

impl Eq for i8x2[src]

impl From<[i8; 2]> for i8x2[src]

impl FromBits<Simd<[m8; 2]>> for i8x2[src]

impl FromBits<Simd<[u8; 2]>> for i8x2[src]

impl FromCast<Simd<[f32; 2]>> for i8x2[src]

impl FromCast<Simd<[f64; 2]>> for i8x2[src]

impl FromCast<Simd<[i128; 2]>> for i8x2[src]

impl FromCast<Simd<[i16; 2]>> for i8x2[src]

impl FromCast<Simd<[i32; 2]>> for i8x2[src]

impl FromCast<Simd<[i64; 2]>> for i8x2[src]

impl FromCast<Simd<[isize; 2]>> for i8x2[src]

impl FromCast<Simd<[m128; 2]>> for i8x2[src]

impl FromCast<Simd<[m16; 2]>> for i8x2[src]

impl FromCast<Simd<[m32; 2]>> for i8x2[src]

impl FromCast<Simd<[m64; 2]>> for i8x2[src]

impl FromCast<Simd<[m8; 2]>> for i8x2[src]

impl FromCast<Simd<[msize; 2]>> for i8x2[src]

impl FromCast<Simd<[u128; 2]>> for i8x2[src]

impl FromCast<Simd<[u16; 2]>> for i8x2[src]

impl FromCast<Simd<[u32; 2]>> for i8x2[src]

impl FromCast<Simd<[u64; 2]>> for i8x2[src]

impl FromCast<Simd<[u8; 2]>> for i8x2[src]

impl FromCast<Simd<[usize; 2]>> for i8x2[src]

impl Hash for i8x2[src]

impl LowerHex for i8x2[src]

impl Mul<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i8> for i8x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i8; 2]>> for i8x2[src]

impl MulAssign<i8> for i8x2[src]

impl Neg for i8x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i8x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i8x2[src]

impl PartialEq<Simd<[i8; 2]>> for i8x2[src]

impl<'a> Product<&'a Simd<[i8; 2]>> for i8x2[src]

impl Product<Simd<[i8; 2]>> for i8x2[src]

impl Rem<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i8> for i8x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i8; 2]>> for i8x2[src]

impl RemAssign<i8> for i8x2[src]

impl Shl<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i8x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i8; 2]>> for i8x2[src]

impl ShlAssign<u32> for i8x2[src]

impl Shr<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i8x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i8; 2]>> for i8x2[src]

impl ShrAssign<u32> for i8x2[src]

impl Simd for i8x2[src]

type Element = i8

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[i8; 2]>> for i8x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i8> for i8x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i8; 2]>> for i8x2[src]

impl SubAssign<i8> for i8x2[src]

impl<'a> Sum<&'a Simd<[i8; 2]>> for i8x2[src]

impl Sum<Simd<[i8; 2]>> for i8x2[src]

impl UpperHex for i8x2[src]

\ No newline at end of file diff --git a/packed_simd/type.i8x32.html b/packed_simd/type.i8x32.html new file mode 100644 index 000000000..863109b1e --- /dev/null +++ b/packed_simd/type.i8x32.html @@ -0,0 +1,236 @@ +packed_simd::i8x32 - Rust

[][src]Type Definition packed_simd::i8x32

type i8x32 = Simd<[i8; 32]>;

A 256-bit vector with 32 i8 lanes.

+

Implementations

impl i8x32[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8,
    x16: i8,
    x17: i8,
    x18: i8,
    x19: i8,
    x20: i8,
    x21: i8,
    x22: i8,
    x23: i8,
    x24: i8,
    x25: i8,
    x26: i8,
    x27: i8,
    x28: i8,
    x29: i8,
    x30: i8,
    x31: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i8x32[src]

pub fn rotate_left(self, n: i8x32) -> i8x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x32) -> i8x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i8x32[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i8x32[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i8x32[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl i8x32[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i8x32[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x32[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x32[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i8x32[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i8x32[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i8x32[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl i8x32[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl i8x32[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x32>[src]

Returns a wrapper that implements Ord.

+

impl i8x32[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i8> for i8x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i8; 32]>> for i8x32[src]

impl AddAssign<i8> for i8x32[src]

impl Binary for i8x32[src]

impl BitAnd<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i8> for i8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitAndAssign<i8> for i8x32[src]

impl BitOr<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i8> for i8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitOrAssign<i8> for i8x32[src]

impl BitXor<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i8> for i8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i8; 32]>> for i8x32[src]

impl BitXorAssign<i8> for i8x32[src]

impl Debug for i8x32[src]

impl Default for i8x32[src]

impl Div<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i8> for i8x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i8; 32]>> for i8x32[src]

impl DivAssign<i8> for i8x32[src]

impl Eq for i8x32[src]

impl From<[i8; 32]> for i8x32[src]

impl FromBits<Simd<[f32; 8]>> for i8x32[src]

impl FromBits<Simd<[f64; 4]>> for i8x32[src]

impl FromBits<Simd<[i128; 2]>> for i8x32[src]

impl FromBits<Simd<[i16; 16]>> for i8x32[src]

impl FromBits<Simd<[i32; 8]>> for i8x32[src]

impl FromBits<Simd<[i64; 4]>> for i8x32[src]

impl FromBits<Simd<[m128; 2]>> for i8x32[src]

impl FromBits<Simd<[m16; 16]>> for i8x32[src]

impl FromBits<Simd<[m32; 8]>> for i8x32[src]

impl FromBits<Simd<[m64; 4]>> for i8x32[src]

impl FromBits<Simd<[m8; 32]>> for i8x32[src]

impl FromBits<Simd<[u128; 2]>> for i8x32[src]

impl FromBits<Simd<[u16; 16]>> for i8x32[src]

impl FromBits<Simd<[u32; 8]>> for i8x32[src]

impl FromBits<Simd<[u64; 4]>> for i8x32[src]

impl FromBits<Simd<[u8; 32]>> for i8x32[src]

impl FromBits<__m256> for i8x32[src]

impl FromBits<__m256d> for i8x32[src]

impl FromBits<__m256i> for i8x32[src]

impl FromCast<Simd<[i16; 32]>> for i8x32[src]

impl FromCast<Simd<[m16; 32]>> for i8x32[src]

impl FromCast<Simd<[m8; 32]>> for i8x32[src]

impl FromCast<Simd<[u16; 32]>> for i8x32[src]

impl FromCast<Simd<[u8; 32]>> for i8x32[src]

impl Hash for i8x32[src]

impl LowerHex for i8x32[src]

impl Mul<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i8> for i8x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i8; 32]>> for i8x32[src]

impl MulAssign<i8> for i8x32[src]

impl Neg for i8x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i8x32[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i8x32[src]

impl PartialEq<Simd<[i8; 32]>> for i8x32[src]

impl<'a> Product<&'a Simd<[i8; 32]>> for i8x32[src]

impl Product<Simd<[i8; 32]>> for i8x32[src]

impl Rem<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i8> for i8x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i8; 32]>> for i8x32[src]

impl RemAssign<i8> for i8x32[src]

impl Shl<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i8x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i8; 32]>> for i8x32[src]

impl ShlAssign<u32> for i8x32[src]

impl Shr<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i8x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i8; 32]>> for i8x32[src]

impl ShrAssign<u32> for i8x32[src]

impl Simd for i8x32[src]

type Element = i8

Element type of the SIMD vector

+

type LanesType = [u32; 32]

The type: [u32; Self::N].

+

impl Sub<Simd<[i8; 32]>> for i8x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i8> for i8x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i8; 32]>> for i8x32[src]

impl SubAssign<i8> for i8x32[src]

impl<'a> Sum<&'a Simd<[i8; 32]>> for i8x32[src]

impl Sum<Simd<[i8; 32]>> for i8x32[src]

impl UpperHex for i8x32[src]

\ No newline at end of file diff --git a/packed_simd/type.i8x4.html b/packed_simd/type.i8x4.html new file mode 100644 index 000000000..f37b8f579 --- /dev/null +++ b/packed_simd/type.i8x4.html @@ -0,0 +1,236 @@ +packed_simd::i8x4 - Rust

[][src]Type Definition packed_simd::i8x4

type i8x4 = Simd<[i8; 4]>;

A 32-bit vector with 4 i8 lanes.

+

Implementations

impl i8x4[src]

pub const fn new(x0: i8, x1: i8, x2: i8, x3: i8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i8x4[src]

pub fn rotate_left(self, n: i8x4) -> i8x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x4) -> i8x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i8x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i8x4[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i8x4[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl i8x4[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i8x4[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i8x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i8x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i8x4[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl i8x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl i8x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x4>[src]

Returns a wrapper that implements Ord.

+

impl i8x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i8> for i8x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i8; 4]>> for i8x4[src]

impl AddAssign<i8> for i8x4[src]

impl Binary for i8x4[src]

impl BitAnd<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i8> for i8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitAndAssign<i8> for i8x4[src]

impl BitOr<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i8> for i8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitOrAssign<i8> for i8x4[src]

impl BitXor<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i8> for i8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i8; 4]>> for i8x4[src]

impl BitXorAssign<i8> for i8x4[src]

impl Debug for i8x4[src]

impl Default for i8x4[src]

impl Div<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i8> for i8x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i8; 4]>> for i8x4[src]

impl DivAssign<i8> for i8x4[src]

impl Eq for i8x4[src]

impl From<[i8; 4]> for i8x4[src]

impl FromBits<Simd<[i16; 2]>> for i8x4[src]

impl FromBits<Simd<[m16; 2]>> for i8x4[src]

impl FromBits<Simd<[m8; 4]>> for i8x4[src]

impl FromBits<Simd<[u16; 2]>> for i8x4[src]

impl FromBits<Simd<[u8; 4]>> for i8x4[src]

impl FromCast<Simd<[f32; 4]>> for i8x4[src]

impl FromCast<Simd<[f64; 4]>> for i8x4[src]

impl FromCast<Simd<[i128; 4]>> for i8x4[src]

impl FromCast<Simd<[i16; 4]>> for i8x4[src]

impl FromCast<Simd<[i32; 4]>> for i8x4[src]

impl FromCast<Simd<[i64; 4]>> for i8x4[src]

impl FromCast<Simd<[isize; 4]>> for i8x4[src]

impl FromCast<Simd<[m128; 4]>> for i8x4[src]

impl FromCast<Simd<[m16; 4]>> for i8x4[src]

impl FromCast<Simd<[m32; 4]>> for i8x4[src]

impl FromCast<Simd<[m64; 4]>> for i8x4[src]

impl FromCast<Simd<[m8; 4]>> for i8x4[src]

impl FromCast<Simd<[msize; 4]>> for i8x4[src]

impl FromCast<Simd<[u128; 4]>> for i8x4[src]

impl FromCast<Simd<[u16; 4]>> for i8x4[src]

impl FromCast<Simd<[u32; 4]>> for i8x4[src]

impl FromCast<Simd<[u64; 4]>> for i8x4[src]

impl FromCast<Simd<[u8; 4]>> for i8x4[src]

impl FromCast<Simd<[usize; 4]>> for i8x4[src]

impl Hash for i8x4[src]

impl LowerHex for i8x4[src]

impl Mul<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i8> for i8x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i8; 4]>> for i8x4[src]

impl MulAssign<i8> for i8x4[src]

impl Neg for i8x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i8x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i8x4[src]

impl PartialEq<Simd<[i8; 4]>> for i8x4[src]

impl<'a> Product<&'a Simd<[i8; 4]>> for i8x4[src]

impl Product<Simd<[i8; 4]>> for i8x4[src]

impl Rem<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i8> for i8x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i8; 4]>> for i8x4[src]

impl RemAssign<i8> for i8x4[src]

impl Shl<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i8x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i8; 4]>> for i8x4[src]

impl ShlAssign<u32> for i8x4[src]

impl Shr<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i8x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i8; 4]>> for i8x4[src]

impl ShrAssign<u32> for i8x4[src]

impl Simd for i8x4[src]

type Element = i8

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[i8; 4]>> for i8x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i8> for i8x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i8; 4]>> for i8x4[src]

impl SubAssign<i8> for i8x4[src]

impl<'a> Sum<&'a Simd<[i8; 4]>> for i8x4[src]

impl Sum<Simd<[i8; 4]>> for i8x4[src]

impl UpperHex for i8x4[src]

\ No newline at end of file diff --git a/packed_simd/type.i8x64.html b/packed_simd/type.i8x64.html new file mode 100644 index 000000000..0e029f7e4 --- /dev/null +++ b/packed_simd/type.i8x64.html @@ -0,0 +1,230 @@ +packed_simd::i8x64 - Rust

[][src]Type Definition packed_simd::i8x64

type i8x64 = Simd<[i8; 64]>;

A 512-bit vector with 64 i8 lanes.

+

Implementations

impl i8x64[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8,
    x8: i8,
    x9: i8,
    x10: i8,
    x11: i8,
    x12: i8,
    x13: i8,
    x14: i8,
    x15: i8,
    x16: i8,
    x17: i8,
    x18: i8,
    x19: i8,
    x20: i8,
    x21: i8,
    x22: i8,
    x23: i8,
    x24: i8,
    x25: i8,
    x26: i8,
    x27: i8,
    x28: i8,
    x29: i8,
    x30: i8,
    x31: i8,
    x32: i8,
    x33: i8,
    x34: i8,
    x35: i8,
    x36: i8,
    x37: i8,
    x38: i8,
    x39: i8,
    x40: i8,
    x41: i8,
    x42: i8,
    x43: i8,
    x44: i8,
    x45: i8,
    x46: i8,
    x47: i8,
    x48: i8,
    x49: i8,
    x50: i8,
    x51: i8,
    x52: i8,
    x53: i8,
    x54: i8,
    x55: i8,
    x56: i8,
    x57: i8,
    x58: i8,
    x59: i8,
    x60: i8,
    x61: i8,
    x62: i8,
    x63: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i8x64[src]

pub fn rotate_left(self, n: i8x64) -> i8x64[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x64) -> i8x64[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i8x64[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i8x64[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i8x64[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl i8x64[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i8x64[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x64[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x64[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i8x64[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i8x64[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i8x64[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

+

impl i8x64[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x64>[src]

Returns a wrapper that implements PartialOrd.

+

impl i8x64[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x64>[src]

Returns a wrapper that implements Ord.

+

impl i8x64[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i8> for i8x64[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i8; 64]>> for i8x64[src]

impl AddAssign<i8> for i8x64[src]

impl Binary for i8x64[src]

impl BitAnd<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i8> for i8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitAndAssign<i8> for i8x64[src]

impl BitOr<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i8> for i8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitOrAssign<i8> for i8x64[src]

impl BitXor<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i8> for i8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i8; 64]>> for i8x64[src]

impl BitXorAssign<i8> for i8x64[src]

impl Debug for i8x64[src]

impl Default for i8x64[src]

impl Div<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i8> for i8x64[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i8; 64]>> for i8x64[src]

impl DivAssign<i8> for i8x64[src]

impl Eq for i8x64[src]

impl From<[i8; 64]> for i8x64[src]

impl FromBits<Simd<[f32; 16]>> for i8x64[src]

impl FromBits<Simd<[f64; 8]>> for i8x64[src]

impl FromBits<Simd<[i128; 4]>> for i8x64[src]

impl FromBits<Simd<[i16; 32]>> for i8x64[src]

impl FromBits<Simd<[i32; 16]>> for i8x64[src]

impl FromBits<Simd<[i64; 8]>> for i8x64[src]

impl FromBits<Simd<[m128; 4]>> for i8x64[src]

impl FromBits<Simd<[m16; 32]>> for i8x64[src]

impl FromBits<Simd<[m32; 16]>> for i8x64[src]

impl FromBits<Simd<[m64; 8]>> for i8x64[src]

impl FromBits<Simd<[m8; 64]>> for i8x64[src]

impl FromBits<Simd<[u128; 4]>> for i8x64[src]

impl FromBits<Simd<[u16; 32]>> for i8x64[src]

impl FromBits<Simd<[u32; 16]>> for i8x64[src]

impl FromBits<Simd<[u64; 8]>> for i8x64[src]

impl FromBits<Simd<[u8; 64]>> for i8x64[src]

impl FromCast<Simd<[m8; 64]>> for i8x64[src]

impl FromCast<Simd<[u8; 64]>> for i8x64[src]

impl Hash for i8x64[src]

impl LowerHex for i8x64[src]

impl Mul<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i8> for i8x64[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i8; 64]>> for i8x64[src]

impl MulAssign<i8> for i8x64[src]

impl Neg for i8x64[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i8x64[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i8x64[src]

impl PartialEq<Simd<[i8; 64]>> for i8x64[src]

impl<'a> Product<&'a Simd<[i8; 64]>> for i8x64[src]

impl Product<Simd<[i8; 64]>> for i8x64[src]

impl Rem<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i8> for i8x64[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i8; 64]>> for i8x64[src]

impl RemAssign<i8> for i8x64[src]

impl Shl<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i8x64[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i8; 64]>> for i8x64[src]

impl ShlAssign<u32> for i8x64[src]

impl Shr<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i8x64[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i8; 64]>> for i8x64[src]

impl ShrAssign<u32> for i8x64[src]

impl Simd for i8x64[src]

type Element = i8

Element type of the SIMD vector

+

type LanesType = [u32; 64]

The type: [u32; Self::N].

+

impl Sub<Simd<[i8; 64]>> for i8x64[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i8> for i8x64[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i8; 64]>> for i8x64[src]

impl SubAssign<i8> for i8x64[src]

impl<'a> Sum<&'a Simd<[i8; 64]>> for i8x64[src]

impl Sum<Simd<[i8; 64]>> for i8x64[src]

impl UpperHex for i8x64[src]

\ No newline at end of file diff --git a/packed_simd/type.i8x8.html b/packed_simd/type.i8x8.html new file mode 100644 index 000000000..b735490ee --- /dev/null +++ b/packed_simd/type.i8x8.html @@ -0,0 +1,238 @@ +packed_simd::i8x8 - Rust

[][src]Type Definition packed_simd::i8x8

type i8x8 = Simd<[i8; 8]>;

A 64-bit vector with 8 i8 lanes.

+

Implementations

impl i8x8[src]

pub const fn new(
    x0: i8,
    x1: i8,
    x2: i8,
    x3: i8,
    x4: i8,
    x5: i8,
    x6: i8,
    x7: i8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: i8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> i8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> i8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: i8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl i8x8[src]

pub fn rotate_left(self, n: i8x8) -> i8x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: i8x8) -> i8x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl i8x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl i8x8[src]

pub fn wrapping_sum(self) -> i8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> i8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl i8x8[src]

pub fn max_element(self) -> i8[src]

Largest vector element value.

+

pub fn min_element(self) -> i8[src]

Smallest vector element value.

+

impl i8x8[src]

pub fn and(self) -> i8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> i8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> i8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl i8x8[src]

pub fn from_slice_aligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[i8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [i8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl i8x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl i8x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl i8x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl i8x8[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl i8x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<i8x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl i8x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<i8x8>[src]

Returns a wrapper that implements Ord.

+

impl i8x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<i8> for i8x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[i8; 8]>> for i8x8[src]

impl AddAssign<i8> for i8x8[src]

impl Binary for i8x8[src]

impl BitAnd<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<i8> for i8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitAndAssign<i8> for i8x8[src]

impl BitOr<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<i8> for i8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitOrAssign<i8> for i8x8[src]

impl BitXor<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<i8> for i8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[i8; 8]>> for i8x8[src]

impl BitXorAssign<i8> for i8x8[src]

impl Debug for i8x8[src]

impl Default for i8x8[src]

impl Div<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<i8> for i8x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[i8; 8]>> for i8x8[src]

impl DivAssign<i8> for i8x8[src]

impl Eq for i8x8[src]

impl From<[i8; 8]> for i8x8[src]

impl FromBits<Simd<[f32; 2]>> for i8x8[src]

impl FromBits<Simd<[i16; 4]>> for i8x8[src]

impl FromBits<Simd<[i32; 2]>> for i8x8[src]

impl FromBits<Simd<[m16; 4]>> for i8x8[src]

impl FromBits<Simd<[m32; 2]>> for i8x8[src]

impl FromBits<Simd<[m8; 8]>> for i8x8[src]

impl FromBits<Simd<[u16; 4]>> for i8x8[src]

impl FromBits<Simd<[u32; 2]>> for i8x8[src]

impl FromBits<Simd<[u8; 8]>> for i8x8[src]

impl FromBits<__m64> for i8x8[src]

impl FromCast<Simd<[f32; 8]>> for i8x8[src]

impl FromCast<Simd<[f64; 8]>> for i8x8[src]

impl FromCast<Simd<[i16; 8]>> for i8x8[src]

impl FromCast<Simd<[i32; 8]>> for i8x8[src]

impl FromCast<Simd<[i64; 8]>> for i8x8[src]

impl FromCast<Simd<[isize; 8]>> for i8x8[src]

impl FromCast<Simd<[m16; 8]>> for i8x8[src]

impl FromCast<Simd<[m32; 8]>> for i8x8[src]

impl FromCast<Simd<[m64; 8]>> for i8x8[src]

impl FromCast<Simd<[m8; 8]>> for i8x8[src]

impl FromCast<Simd<[msize; 8]>> for i8x8[src]

impl FromCast<Simd<[u16; 8]>> for i8x8[src]

impl FromCast<Simd<[u32; 8]>> for i8x8[src]

impl FromCast<Simd<[u64; 8]>> for i8x8[src]

impl FromCast<Simd<[u8; 8]>> for i8x8[src]

impl FromCast<Simd<[usize; 8]>> for i8x8[src]

impl Hash for i8x8[src]

impl LowerHex for i8x8[src]

impl Mul<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<i8> for i8x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[i8; 8]>> for i8x8[src]

impl MulAssign<i8> for i8x8[src]

impl Neg for i8x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for i8x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for i8x8[src]

impl PartialEq<Simd<[i8; 8]>> for i8x8[src]

impl<'a> Product<&'a Simd<[i8; 8]>> for i8x8[src]

impl Product<Simd<[i8; 8]>> for i8x8[src]

impl Rem<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<i8> for i8x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[i8; 8]>> for i8x8[src]

impl RemAssign<i8> for i8x8[src]

impl Shl<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for i8x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[i8; 8]>> for i8x8[src]

impl ShlAssign<u32> for i8x8[src]

impl Shr<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for i8x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[i8; 8]>> for i8x8[src]

impl ShrAssign<u32> for i8x8[src]

impl Simd for i8x8[src]

type Element = i8

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[i8; 8]>> for i8x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<i8> for i8x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[i8; 8]>> for i8x8[src]

impl SubAssign<i8> for i8x8[src]

impl<'a> Sum<&'a Simd<[i8; 8]>> for i8x8[src]

impl Sum<Simd<[i8; 8]>> for i8x8[src]

impl UpperHex for i8x8[src]

\ No newline at end of file diff --git a/packed_simd/type.isizex2.html b/packed_simd/type.isizex2.html new file mode 100644 index 000000000..3fbe1bb91 --- /dev/null +++ b/packed_simd/type.isizex2.html @@ -0,0 +1,231 @@ +packed_simd::isizex2 - Rust

[][src]Type Definition packed_simd::isizex2

type isizex2 = Simd<[isize; 2]>;

A vector with 2 isize lanes.

+

Implementations

impl isizex2[src]

pub const fn new(x0: isize, x1: isize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl isizex2[src]

pub fn rotate_left(self, n: isizex2) -> isizex2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: isizex2) -> isizex2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl isizex2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl isizex2[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl isizex2[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

+

pub fn min_element(self) -> isize[src]

Smallest vector element value.

+

impl isizex2[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl isizex2[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl isizex2[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl isizex2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl isizex2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl isizex2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl isizex2[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl isizex2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex2>[src]

Returns a wrapper that implements PartialOrd.

+

impl isizex2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex2>[src]

Returns a wrapper that implements Ord.

+

impl isizex2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<isize> for isizex2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[isize; 2]>> for isizex2[src]

impl AddAssign<isize> for isizex2[src]

impl Binary for isizex2[src]

impl BitAnd<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<isize> for isizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitAndAssign<isize> for isizex2[src]

impl BitOr<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<isize> for isizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitOrAssign<isize> for isizex2[src]

impl BitXor<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<isize> for isizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[isize; 2]>> for isizex2[src]

impl BitXorAssign<isize> for isizex2[src]

impl Debug for isizex2[src]

impl Default for isizex2[src]

impl Div<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<isize> for isizex2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[isize; 2]>> for isizex2[src]

impl DivAssign<isize> for isizex2[src]

impl Eq for isizex2[src]

impl From<[isize; 2]> for isizex2[src]

impl FromCast<Simd<[f32; 2]>> for isizex2[src]

impl FromCast<Simd<[f64; 2]>> for isizex2[src]

impl FromCast<Simd<[i128; 2]>> for isizex2[src]

impl FromCast<Simd<[i16; 2]>> for isizex2[src]

impl FromCast<Simd<[i32; 2]>> for isizex2[src]

impl FromCast<Simd<[i64; 2]>> for isizex2[src]

impl FromCast<Simd<[i8; 2]>> for isizex2[src]

impl FromCast<Simd<[m128; 2]>> for isizex2[src]

impl FromCast<Simd<[m16; 2]>> for isizex2[src]

impl FromCast<Simd<[m32; 2]>> for isizex2[src]

impl FromCast<Simd<[m64; 2]>> for isizex2[src]

impl FromCast<Simd<[m8; 2]>> for isizex2[src]

impl FromCast<Simd<[msize; 2]>> for isizex2[src]

impl FromCast<Simd<[u128; 2]>> for isizex2[src]

impl FromCast<Simd<[u16; 2]>> for isizex2[src]

impl FromCast<Simd<[u32; 2]>> for isizex2[src]

impl FromCast<Simd<[u64; 2]>> for isizex2[src]

impl FromCast<Simd<[u8; 2]>> for isizex2[src]

impl FromCast<Simd<[usize; 2]>> for isizex2[src]

impl Hash for isizex2[src]

impl LowerHex for isizex2[src]

impl Mul<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<isize> for isizex2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[isize; 2]>> for isizex2[src]

impl MulAssign<isize> for isizex2[src]

impl Neg for isizex2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for isizex2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for isizex2[src]

impl PartialEq<Simd<[isize; 2]>> for isizex2[src]

impl<'a> Product<&'a Simd<[isize; 2]>> for isizex2[src]

impl Product<Simd<[isize; 2]>> for isizex2[src]

impl Rem<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<isize> for isizex2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[isize; 2]>> for isizex2[src]

impl RemAssign<isize> for isizex2[src]

impl Shl<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for isizex2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[isize; 2]>> for isizex2[src]

impl ShlAssign<u32> for isizex2[src]

impl Shr<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for isizex2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[isize; 2]>> for isizex2[src]

impl ShrAssign<u32> for isizex2[src]

impl Simd for isizex2[src]

type Element = isize

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[isize; 2]>> for isizex2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<isize> for isizex2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[isize; 2]>> for isizex2[src]

impl SubAssign<isize> for isizex2[src]

impl<'a> Sum<&'a Simd<[isize; 2]>> for isizex2[src]

impl Sum<Simd<[isize; 2]>> for isizex2[src]

impl UpperHex for isizex2[src]

\ No newline at end of file diff --git a/packed_simd/type.isizex4.html b/packed_simd/type.isizex4.html new file mode 100644 index 000000000..049542dab --- /dev/null +++ b/packed_simd/type.isizex4.html @@ -0,0 +1,231 @@ +packed_simd::isizex4 - Rust

[][src]Type Definition packed_simd::isizex4

type isizex4 = Simd<[isize; 4]>;

A vector with 4 isize lanes.

+

Implementations

impl isizex4[src]

pub const fn new(x0: isize, x1: isize, x2: isize, x3: isize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl isizex4[src]

pub fn rotate_left(self, n: isizex4) -> isizex4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: isizex4) -> isizex4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl isizex4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl isizex4[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl isizex4[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

+

pub fn min_element(self) -> isize[src]

Smallest vector element value.

+

impl isizex4[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl isizex4[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl isizex4[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl isizex4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl isizex4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl isizex4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl isizex4[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl isizex4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex4>[src]

Returns a wrapper that implements PartialOrd.

+

impl isizex4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex4>[src]

Returns a wrapper that implements Ord.

+

impl isizex4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<isize> for isizex4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[isize; 4]>> for isizex4[src]

impl AddAssign<isize> for isizex4[src]

impl Binary for isizex4[src]

impl BitAnd<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<isize> for isizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitAndAssign<isize> for isizex4[src]

impl BitOr<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<isize> for isizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitOrAssign<isize> for isizex4[src]

impl BitXor<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<isize> for isizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[isize; 4]>> for isizex4[src]

impl BitXorAssign<isize> for isizex4[src]

impl Debug for isizex4[src]

impl Default for isizex4[src]

impl Div<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<isize> for isizex4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[isize; 4]>> for isizex4[src]

impl DivAssign<isize> for isizex4[src]

impl Eq for isizex4[src]

impl From<[isize; 4]> for isizex4[src]

impl FromCast<Simd<[f32; 4]>> for isizex4[src]

impl FromCast<Simd<[f64; 4]>> for isizex4[src]

impl FromCast<Simd<[i128; 4]>> for isizex4[src]

impl FromCast<Simd<[i16; 4]>> for isizex4[src]

impl FromCast<Simd<[i32; 4]>> for isizex4[src]

impl FromCast<Simd<[i64; 4]>> for isizex4[src]

impl FromCast<Simd<[i8; 4]>> for isizex4[src]

impl FromCast<Simd<[m128; 4]>> for isizex4[src]

impl FromCast<Simd<[m16; 4]>> for isizex4[src]

impl FromCast<Simd<[m32; 4]>> for isizex4[src]

impl FromCast<Simd<[m64; 4]>> for isizex4[src]

impl FromCast<Simd<[m8; 4]>> for isizex4[src]

impl FromCast<Simd<[msize; 4]>> for isizex4[src]

impl FromCast<Simd<[u128; 4]>> for isizex4[src]

impl FromCast<Simd<[u16; 4]>> for isizex4[src]

impl FromCast<Simd<[u32; 4]>> for isizex4[src]

impl FromCast<Simd<[u64; 4]>> for isizex4[src]

impl FromCast<Simd<[u8; 4]>> for isizex4[src]

impl FromCast<Simd<[usize; 4]>> for isizex4[src]

impl Hash for isizex4[src]

impl LowerHex for isizex4[src]

impl Mul<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<isize> for isizex4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[isize; 4]>> for isizex4[src]

impl MulAssign<isize> for isizex4[src]

impl Neg for isizex4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for isizex4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for isizex4[src]

impl PartialEq<Simd<[isize; 4]>> for isizex4[src]

impl<'a> Product<&'a Simd<[isize; 4]>> for isizex4[src]

impl Product<Simd<[isize; 4]>> for isizex4[src]

impl Rem<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<isize> for isizex4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[isize; 4]>> for isizex4[src]

impl RemAssign<isize> for isizex4[src]

impl Shl<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for isizex4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[isize; 4]>> for isizex4[src]

impl ShlAssign<u32> for isizex4[src]

impl Shr<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for isizex4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[isize; 4]>> for isizex4[src]

impl ShrAssign<u32> for isizex4[src]

impl Simd for isizex4[src]

type Element = isize

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[isize; 4]>> for isizex4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<isize> for isizex4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[isize; 4]>> for isizex4[src]

impl SubAssign<isize> for isizex4[src]

impl<'a> Sum<&'a Simd<[isize; 4]>> for isizex4[src]

impl Sum<Simd<[isize; 4]>> for isizex4[src]

impl UpperHex for isizex4[src]

\ No newline at end of file diff --git a/packed_simd/type.isizex8.html b/packed_simd/type.isizex8.html new file mode 100644 index 000000000..7a6cf1504 --- /dev/null +++ b/packed_simd/type.isizex8.html @@ -0,0 +1,228 @@ +packed_simd::isizex8 - Rust

[][src]Type Definition packed_simd::isizex8

type isizex8 = Simd<[isize; 8]>;

A vector with 4 isize lanes.

+

Implementations

impl isizex8[src]

pub const fn new(
    x0: isize,
    x1: isize,
    x2: isize,
    x3: isize,
    x4: isize,
    x5: isize,
    x6: isize,
    x7: isize
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: isize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> isize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> isize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: isize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl isizex8[src]

pub fn rotate_left(self, n: isizex8) -> isizex8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: isizex8) -> isizex8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl isizex8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl isizex8[src]

pub fn wrapping_sum(self) -> isize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> isize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl isizex8[src]

pub fn max_element(self) -> isize[src]

Largest vector element value.

+

pub fn min_element(self) -> isize[src]

Smallest vector element value.

+

impl isizex8[src]

pub fn and(self) -> isize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> isize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> isize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl isizex8[src]

pub fn from_slice_aligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[isize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl isizex8[src]

pub fn write_to_slice_aligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [isize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl isizex8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl isizex8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl isizex8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl isizex8[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl isizex8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<isizex8>[src]

Returns a wrapper that implements PartialOrd.

+

impl isizex8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<isizex8>[src]

Returns a wrapper that implements Ord.

+

impl isizex8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<isize> for isizex8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[isize; 8]>> for isizex8[src]

impl AddAssign<isize> for isizex8[src]

impl Binary for isizex8[src]

impl BitAnd<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<isize> for isizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitAndAssign<isize> for isizex8[src]

impl BitOr<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<isize> for isizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitOrAssign<isize> for isizex8[src]

impl BitXor<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<isize> for isizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[isize; 8]>> for isizex8[src]

impl BitXorAssign<isize> for isizex8[src]

impl Debug for isizex8[src]

impl Default for isizex8[src]

impl Div<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<isize> for isizex8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[isize; 8]>> for isizex8[src]

impl DivAssign<isize> for isizex8[src]

impl Eq for isizex8[src]

impl From<[isize; 8]> for isizex8[src]

impl FromCast<Simd<[f32; 8]>> for isizex8[src]

impl FromCast<Simd<[f64; 8]>> for isizex8[src]

impl FromCast<Simd<[i16; 8]>> for isizex8[src]

impl FromCast<Simd<[i32; 8]>> for isizex8[src]

impl FromCast<Simd<[i64; 8]>> for isizex8[src]

impl FromCast<Simd<[i8; 8]>> for isizex8[src]

impl FromCast<Simd<[m16; 8]>> for isizex8[src]

impl FromCast<Simd<[m32; 8]>> for isizex8[src]

impl FromCast<Simd<[m64; 8]>> for isizex8[src]

impl FromCast<Simd<[m8; 8]>> for isizex8[src]

impl FromCast<Simd<[msize; 8]>> for isizex8[src]

impl FromCast<Simd<[u16; 8]>> for isizex8[src]

impl FromCast<Simd<[u32; 8]>> for isizex8[src]

impl FromCast<Simd<[u64; 8]>> for isizex8[src]

impl FromCast<Simd<[u8; 8]>> for isizex8[src]

impl FromCast<Simd<[usize; 8]>> for isizex8[src]

impl Hash for isizex8[src]

impl LowerHex for isizex8[src]

impl Mul<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<isize> for isizex8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[isize; 8]>> for isizex8[src]

impl MulAssign<isize> for isizex8[src]

impl Neg for isizex8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Not for isizex8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for isizex8[src]

impl PartialEq<Simd<[isize; 8]>> for isizex8[src]

impl<'a> Product<&'a Simd<[isize; 8]>> for isizex8[src]

impl Product<Simd<[isize; 8]>> for isizex8[src]

impl Rem<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<isize> for isizex8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[isize; 8]>> for isizex8[src]

impl RemAssign<isize> for isizex8[src]

impl Shl<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for isizex8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[isize; 8]>> for isizex8[src]

impl ShlAssign<u32> for isizex8[src]

impl Shr<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for isizex8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[isize; 8]>> for isizex8[src]

impl ShrAssign<u32> for isizex8[src]

impl Simd for isizex8[src]

type Element = isize

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[isize; 8]>> for isizex8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<isize> for isizex8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[isize; 8]>> for isizex8[src]

impl SubAssign<isize> for isizex8[src]

impl<'a> Sum<&'a Simd<[isize; 8]>> for isizex8[src]

impl Sum<Simd<[isize; 8]>> for isizex8[src]

impl UpperHex for isizex8[src]

\ No newline at end of file diff --git a/packed_simd/type.m128x1.html b/packed_simd/type.m128x1.html new file mode 100644 index 000000000..0f81f00ae --- /dev/null +++ b/packed_simd/type.m128x1.html @@ -0,0 +1,84 @@ +packed_simd::m128x1 - Rust

[][src]Type Definition packed_simd::m128x1

type m128x1 = Simd<[m128; 1]>;

A 128-bit vector mask with 1 m128 lane.

+

Implementations

impl m128x1[src]

pub const fn new(x0: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m128x1[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m128x1[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m128x1[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

+

impl m128x1[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 1] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m128x1[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x1>[src]

Returns a wrapper that implements PartialOrd.

+

impl m128x1[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x1>[src]

Returns a wrapper that implements Ord.

+

impl m128x1[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m128x1[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitAndAssign<bool> for m128x1[src]

impl BitOr<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitOrAssign<bool> for m128x1[src]

impl BitXor<Simd<[m128; 1]>> for m128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m128; 1]>> for m128x1[src]

impl BitXorAssign<bool> for m128x1[src]

impl Debug for m128x1[src]

impl Default for m128x1[src]

impl Eq for m128x1[src]

impl From<[m128; 1]> for m128x1[src]

impl FromCast<Simd<[i128; 1]>> for m128x1[src]

impl FromCast<Simd<[u128; 1]>> for m128x1[src]

impl Not for m128x1[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m128; 1]>> for m128x1[src]

impl Simd for m128x1[src]

type Element = m128

Element type of the SIMD vector

+

type LanesType = [u32; 1]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m128x2.html b/packed_simd/type.m128x2.html new file mode 100644 index 000000000..1c619d494 --- /dev/null +++ b/packed_simd/type.m128x2.html @@ -0,0 +1,105 @@ +packed_simd::m128x2 - Rust

[][src]Type Definition packed_simd::m128x2

type m128x2 = Simd<[m128; 2]>;

A 256-bit vector mask with 2 m128 lanes.

+

Implementations

impl m128x2[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m128x2[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m128x2[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m128x2[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl m128x2[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m128x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl m128x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x2>[src]

Returns a wrapper that implements Ord.

+

impl m128x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m128x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitAndAssign<bool> for m128x2[src]

impl BitOr<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitOrAssign<bool> for m128x2[src]

impl BitXor<Simd<[m128; 2]>> for m128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m128; 2]>> for m128x2[src]

impl BitXorAssign<bool> for m128x2[src]

impl Debug for m128x2[src]

impl Default for m128x2[src]

impl Eq for m128x2[src]

impl From<[m128; 2]> for m128x2[src]

impl From<Simd<[m16; 2]>> for m128x2[src]

impl From<Simd<[m32; 2]>> for m128x2[src]

impl From<Simd<[m64; 2]>> for m128x2[src]

impl From<Simd<[m8; 2]>> for m128x2[src]

impl FromCast<Simd<[f32; 2]>> for m128x2[src]

impl FromCast<Simd<[f64; 2]>> for m128x2[src]

impl FromCast<Simd<[i128; 2]>> for m128x2[src]

impl FromCast<Simd<[i16; 2]>> for m128x2[src]

impl FromCast<Simd<[i32; 2]>> for m128x2[src]

impl FromCast<Simd<[i64; 2]>> for m128x2[src]

impl FromCast<Simd<[i8; 2]>> for m128x2[src]

impl FromCast<Simd<[isize; 2]>> for m128x2[src]

impl FromCast<Simd<[m16; 2]>> for m128x2[src]

impl FromCast<Simd<[m32; 2]>> for m128x2[src]

impl FromCast<Simd<[m64; 2]>> for m128x2[src]

impl FromCast<Simd<[m8; 2]>> for m128x2[src]

impl FromCast<Simd<[msize; 2]>> for m128x2[src]

impl FromCast<Simd<[u128; 2]>> for m128x2[src]

impl FromCast<Simd<[u16; 2]>> for m128x2[src]

impl FromCast<Simd<[u32; 2]>> for m128x2[src]

impl FromCast<Simd<[u64; 2]>> for m128x2[src]

impl FromCast<Simd<[u8; 2]>> for m128x2[src]

impl FromCast<Simd<[usize; 2]>> for m128x2[src]

impl Not for m128x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m128; 2]>> for m128x2[src]

impl Simd for m128x2[src]

type Element = m128

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m128x4.html b/packed_simd/type.m128x4.html new file mode 100644 index 000000000..c8db7fcd6 --- /dev/null +++ b/packed_simd/type.m128x4.html @@ -0,0 +1,105 @@ +packed_simd::m128x4 - Rust

[][src]Type Definition packed_simd::m128x4

type m128x4 = Simd<[m128; 4]>;

A 512-bit vector mask with 4 m128 lanes.

+

Implementations

impl m128x4[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m128x4[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m128x4[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m128x4[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl m128x4[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m128; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m128x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m128x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl m128x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m128x4>[src]

Returns a wrapper that implements Ord.

+

impl m128x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m128x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitAndAssign<bool> for m128x4[src]

impl BitOr<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitOrAssign<bool> for m128x4[src]

impl BitXor<Simd<[m128; 4]>> for m128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m128; 4]>> for m128x4[src]

impl BitXorAssign<bool> for m128x4[src]

impl Debug for m128x4[src]

impl Default for m128x4[src]

impl Eq for m128x4[src]

impl From<[m128; 4]> for m128x4[src]

impl From<Simd<[m16; 4]>> for m128x4[src]

impl From<Simd<[m32; 4]>> for m128x4[src]

impl From<Simd<[m64; 4]>> for m128x4[src]

impl From<Simd<[m8; 4]>> for m128x4[src]

impl FromCast<Simd<[f32; 4]>> for m128x4[src]

impl FromCast<Simd<[f64; 4]>> for m128x4[src]

impl FromCast<Simd<[i128; 4]>> for m128x4[src]

impl FromCast<Simd<[i16; 4]>> for m128x4[src]

impl FromCast<Simd<[i32; 4]>> for m128x4[src]

impl FromCast<Simd<[i64; 4]>> for m128x4[src]

impl FromCast<Simd<[i8; 4]>> for m128x4[src]

impl FromCast<Simd<[isize; 4]>> for m128x4[src]

impl FromCast<Simd<[m16; 4]>> for m128x4[src]

impl FromCast<Simd<[m32; 4]>> for m128x4[src]

impl FromCast<Simd<[m64; 4]>> for m128x4[src]

impl FromCast<Simd<[m8; 4]>> for m128x4[src]

impl FromCast<Simd<[msize; 4]>> for m128x4[src]

impl FromCast<Simd<[u128; 4]>> for m128x4[src]

impl FromCast<Simd<[u16; 4]>> for m128x4[src]

impl FromCast<Simd<[u32; 4]>> for m128x4[src]

impl FromCast<Simd<[u64; 4]>> for m128x4[src]

impl FromCast<Simd<[u8; 4]>> for m128x4[src]

impl FromCast<Simd<[usize; 4]>> for m128x4[src]

impl Not for m128x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m128; 4]>> for m128x4[src]

impl Simd for m128x4[src]

type Element = m128

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m16x16.html b/packed_simd/type.m16x16.html new file mode 100644 index 000000000..553e3cceb --- /dev/null +++ b/packed_simd/type.m16x16.html @@ -0,0 +1,95 @@ +packed_simd::m16x16 - Rust

[][src]Type Definition packed_simd::m16x16

type m16x16 = Simd<[m16; 16]>;

A 256-bit vector mask with 16 m16 lanes.

+

Implementations

impl m16x16[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m16x16[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m16x16[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m16x16[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl m16x16[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m16x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl m16x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x16>[src]

Returns a wrapper that implements Ord.

+

impl m16x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m16x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitAndAssign<bool> for m16x16[src]

impl BitOr<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitOrAssign<bool> for m16x16[src]

impl BitXor<Simd<[m16; 16]>> for m16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m16; 16]>> for m16x16[src]

impl BitXorAssign<bool> for m16x16[src]

impl Debug for m16x16[src]

impl Default for m16x16[src]

impl Eq for m16x16[src]

impl From<[m16; 16]> for m16x16[src]

impl From<Simd<[m8; 16]>> for m16x16[src]

impl FromBits<Simd<[m128; 2]>> for m16x16[src]

impl FromBits<Simd<[m32; 8]>> for m16x16[src]

impl FromBits<Simd<[m64; 4]>> for m16x16[src]

impl FromCast<Simd<[f32; 16]>> for m16x16[src]

impl FromCast<Simd<[i16; 16]>> for m16x16[src]

impl FromCast<Simd<[i32; 16]>> for m16x16[src]

impl FromCast<Simd<[i8; 16]>> for m16x16[src]

impl FromCast<Simd<[m32; 16]>> for m16x16[src]

impl FromCast<Simd<[m8; 16]>> for m16x16[src]

impl FromCast<Simd<[u16; 16]>> for m16x16[src]

impl FromCast<Simd<[u32; 16]>> for m16x16[src]

impl FromCast<Simd<[u8; 16]>> for m16x16[src]

impl Not for m16x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m16; 16]>> for m16x16[src]

impl Simd for m16x16[src]

type Element = m16

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m16x2.html b/packed_simd/type.m16x2.html new file mode 100644 index 000000000..184b92060 --- /dev/null +++ b/packed_simd/type.m16x2.html @@ -0,0 +1,105 @@ +packed_simd::m16x2 - Rust

[][src]Type Definition packed_simd::m16x2

type m16x2 = Simd<[m16; 2]>;

A 32-bit vector mask with 2 m16 lanes.

+

Implementations

impl m16x2[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m16x2[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m16x2[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m16x2[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl m16x2[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m16x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl m16x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x2>[src]

Returns a wrapper that implements Ord.

+

impl m16x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m16x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitAndAssign<bool> for m16x2[src]

impl BitOr<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitOrAssign<bool> for m16x2[src]

impl BitXor<Simd<[m16; 2]>> for m16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m16; 2]>> for m16x2[src]

impl BitXorAssign<bool> for m16x2[src]

impl Debug for m16x2[src]

impl Default for m16x2[src]

impl Eq for m16x2[src]

impl From<[m16; 2]> for m16x2[src]

impl From<Simd<[m128; 2]>> for m16x2[src]

impl From<Simd<[m32; 2]>> for m16x2[src]

impl From<Simd<[m64; 2]>> for m16x2[src]

impl From<Simd<[m8; 2]>> for m16x2[src]

impl FromCast<Simd<[f32; 2]>> for m16x2[src]

impl FromCast<Simd<[f64; 2]>> for m16x2[src]

impl FromCast<Simd<[i128; 2]>> for m16x2[src]

impl FromCast<Simd<[i16; 2]>> for m16x2[src]

impl FromCast<Simd<[i32; 2]>> for m16x2[src]

impl FromCast<Simd<[i64; 2]>> for m16x2[src]

impl FromCast<Simd<[i8; 2]>> for m16x2[src]

impl FromCast<Simd<[isize; 2]>> for m16x2[src]

impl FromCast<Simd<[m128; 2]>> for m16x2[src]

impl FromCast<Simd<[m32; 2]>> for m16x2[src]

impl FromCast<Simd<[m64; 2]>> for m16x2[src]

impl FromCast<Simd<[m8; 2]>> for m16x2[src]

impl FromCast<Simd<[msize; 2]>> for m16x2[src]

impl FromCast<Simd<[u128; 2]>> for m16x2[src]

impl FromCast<Simd<[u16; 2]>> for m16x2[src]

impl FromCast<Simd<[u32; 2]>> for m16x2[src]

impl FromCast<Simd<[u64; 2]>> for m16x2[src]

impl FromCast<Simd<[u8; 2]>> for m16x2[src]

impl FromCast<Simd<[usize; 2]>> for m16x2[src]

impl Not for m16x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m16; 2]>> for m16x2[src]

impl Simd for m16x2[src]

type Element = m16

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m16x32.html b/packed_simd/type.m16x32.html new file mode 100644 index 000000000..f8d8ffad9 --- /dev/null +++ b/packed_simd/type.m16x32.html @@ -0,0 +1,91 @@ +packed_simd::m16x32 - Rust

[][src]Type Definition packed_simd::m16x32

type m16x32 = Simd<[m16; 32]>;

A 512-bit vector mask with 32 m16 lanes.

+

Implementations

impl m16x32[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m16x32[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m16x32[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m16x32[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl m16x32[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 32] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m16x32[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl m16x32[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x32>[src]

Returns a wrapper that implements Ord.

+

impl m16x32[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m16x32[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitAndAssign<bool> for m16x32[src]

impl BitOr<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitOrAssign<bool> for m16x32[src]

impl BitXor<Simd<[m16; 32]>> for m16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m16; 32]>> for m16x32[src]

impl BitXorAssign<bool> for m16x32[src]

impl Debug for m16x32[src]

impl Default for m16x32[src]

impl Eq for m16x32[src]

impl From<[m16; 32]> for m16x32[src]

impl From<Simd<[m8; 32]>> for m16x32[src]

impl FromBits<Simd<[m128; 4]>> for m16x32[src]

impl FromBits<Simd<[m32; 16]>> for m16x32[src]

impl FromBits<Simd<[m64; 8]>> for m16x32[src]

impl FromCast<Simd<[i16; 32]>> for m16x32[src]

impl FromCast<Simd<[i8; 32]>> for m16x32[src]

impl FromCast<Simd<[m8; 32]>> for m16x32[src]

impl FromCast<Simd<[u16; 32]>> for m16x32[src]

impl FromCast<Simd<[u8; 32]>> for m16x32[src]

impl Not for m16x32[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m16; 32]>> for m16x32[src]

impl Simd for m16x32[src]

type Element = m16

Element type of the SIMD vector

+

type LanesType = [u32; 32]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m16x4.html b/packed_simd/type.m16x4.html new file mode 100644 index 000000000..c907dcc54 --- /dev/null +++ b/packed_simd/type.m16x4.html @@ -0,0 +1,105 @@ +packed_simd::m16x4 - Rust

[][src]Type Definition packed_simd::m16x4

type m16x4 = Simd<[m16; 4]>;

A 64-bit vector mask with 4 m16 lanes.

+

Implementations

impl m16x4[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m16x4[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m16x4[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m16x4[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl m16x4[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m16x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl m16x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x4>[src]

Returns a wrapper that implements Ord.

+

impl m16x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m16x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitAndAssign<bool> for m16x4[src]

impl BitOr<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitOrAssign<bool> for m16x4[src]

impl BitXor<Simd<[m16; 4]>> for m16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m16; 4]>> for m16x4[src]

impl BitXorAssign<bool> for m16x4[src]

impl Debug for m16x4[src]

impl Default for m16x4[src]

impl Eq for m16x4[src]

impl From<[m16; 4]> for m16x4[src]

impl From<Simd<[m32; 4]>> for m16x4[src]

impl From<Simd<[m64; 4]>> for m16x4[src]

impl From<Simd<[m8; 4]>> for m16x4[src]

impl FromBits<Simd<[m32; 2]>> for m16x4[src]

impl FromCast<Simd<[f32; 4]>> for m16x4[src]

impl FromCast<Simd<[f64; 4]>> for m16x4[src]

impl FromCast<Simd<[i128; 4]>> for m16x4[src]

impl FromCast<Simd<[i16; 4]>> for m16x4[src]

impl FromCast<Simd<[i32; 4]>> for m16x4[src]

impl FromCast<Simd<[i64; 4]>> for m16x4[src]

impl FromCast<Simd<[i8; 4]>> for m16x4[src]

impl FromCast<Simd<[isize; 4]>> for m16x4[src]

impl FromCast<Simd<[m128; 4]>> for m16x4[src]

impl FromCast<Simd<[m32; 4]>> for m16x4[src]

impl FromCast<Simd<[m64; 4]>> for m16x4[src]

impl FromCast<Simd<[m8; 4]>> for m16x4[src]

impl FromCast<Simd<[msize; 4]>> for m16x4[src]

impl FromCast<Simd<[u128; 4]>> for m16x4[src]

impl FromCast<Simd<[u16; 4]>> for m16x4[src]

impl FromCast<Simd<[u32; 4]>> for m16x4[src]

impl FromCast<Simd<[u64; 4]>> for m16x4[src]

impl FromCast<Simd<[u8; 4]>> for m16x4[src]

impl FromCast<Simd<[usize; 4]>> for m16x4[src]

impl Not for m16x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m16; 4]>> for m16x4[src]

impl Simd for m16x4[src]

type Element = m16

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m16x8.html b/packed_simd/type.m16x8.html new file mode 100644 index 000000000..8972628b1 --- /dev/null +++ b/packed_simd/type.m16x8.html @@ -0,0 +1,103 @@ +packed_simd::m16x8 - Rust

[][src]Type Definition packed_simd::m16x8

type m16x8 = Simd<[m16; 8]>;

A 128-bit vector mask with 8 m16 lanes.

+

Implementations

impl m16x8[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m16x8[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m16x8[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m16x8[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl m16x8[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m16; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m16x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m16x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl m16x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m16x8>[src]

Returns a wrapper that implements Ord.

+

impl m16x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m16x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitAndAssign<bool> for m16x8[src]

impl BitOr<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitOrAssign<bool> for m16x8[src]

impl BitXor<Simd<[m16; 8]>> for m16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m16; 8]>> for m16x8[src]

impl BitXorAssign<bool> for m16x8[src]

impl Debug for m16x8[src]

impl Default for m16x8[src]

impl Eq for m16x8[src]

impl From<[m16; 8]> for m16x8[src]

impl From<Simd<[m32; 8]>> for m16x8[src]

impl From<Simd<[m8; 8]>> for m16x8[src]

impl FromBits<Simd<[m128; 1]>> for m16x8[src]

impl FromBits<Simd<[m32; 4]>> for m16x8[src]

impl FromBits<Simd<[m64; 2]>> for m16x8[src]

impl FromCast<Simd<[f32; 8]>> for m16x8[src]

impl FromCast<Simd<[f64; 8]>> for m16x8[src]

impl FromCast<Simd<[i16; 8]>> for m16x8[src]

impl FromCast<Simd<[i32; 8]>> for m16x8[src]

impl FromCast<Simd<[i64; 8]>> for m16x8[src]

impl FromCast<Simd<[i8; 8]>> for m16x8[src]

impl FromCast<Simd<[isize; 8]>> for m16x8[src]

impl FromCast<Simd<[m32; 8]>> for m16x8[src]

impl FromCast<Simd<[m64; 8]>> for m16x8[src]

impl FromCast<Simd<[m8; 8]>> for m16x8[src]

impl FromCast<Simd<[msize; 8]>> for m16x8[src]

impl FromCast<Simd<[u16; 8]>> for m16x8[src]

impl FromCast<Simd<[u32; 8]>> for m16x8[src]

impl FromCast<Simd<[u64; 8]>> for m16x8[src]

impl FromCast<Simd<[u8; 8]>> for m16x8[src]

impl FromCast<Simd<[usize; 8]>> for m16x8[src]

impl Not for m16x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m16; 8]>> for m16x8[src]

impl Simd for m16x8[src]

type Element = m16

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m32x16.html b/packed_simd/type.m32x16.html new file mode 100644 index 000000000..8284e171b --- /dev/null +++ b/packed_simd/type.m32x16.html @@ -0,0 +1,95 @@ +packed_simd::m32x16 - Rust

[][src]Type Definition packed_simd::m32x16

type m32x16 = Simd<[m32; 16]>;

A 512-bit vector mask with 16 m32 lanes.

+

Implementations

impl m32x16[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m32x16[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m32x16[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m32x16[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl m32x16[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m32x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl m32x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x16>[src]

Returns a wrapper that implements Ord.

+

impl m32x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m32x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitAndAssign<bool> for m32x16[src]

impl BitOr<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitOrAssign<bool> for m32x16[src]

impl BitXor<Simd<[m32; 16]>> for m32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m32; 16]>> for m32x16[src]

impl BitXorAssign<bool> for m32x16[src]

impl Debug for m32x16[src]

impl Default for m32x16[src]

impl Eq for m32x16[src]

impl From<[m32; 16]> for m32x16[src]

impl From<Simd<[m16; 16]>> for m32x16[src]

impl From<Simd<[m8; 16]>> for m32x16[src]

impl FromBits<Simd<[m128; 4]>> for m32x16[src]

impl FromBits<Simd<[m64; 8]>> for m32x16[src]

impl FromCast<Simd<[f32; 16]>> for m32x16[src]

impl FromCast<Simd<[i16; 16]>> for m32x16[src]

impl FromCast<Simd<[i32; 16]>> for m32x16[src]

impl FromCast<Simd<[i8; 16]>> for m32x16[src]

impl FromCast<Simd<[m16; 16]>> for m32x16[src]

impl FromCast<Simd<[m8; 16]>> for m32x16[src]

impl FromCast<Simd<[u16; 16]>> for m32x16[src]

impl FromCast<Simd<[u32; 16]>> for m32x16[src]

impl FromCast<Simd<[u8; 16]>> for m32x16[src]

impl Not for m32x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m32; 16]>> for m32x16[src]

impl Simd for m32x16[src]

type Element = m32

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m32x2.html b/packed_simd/type.m32x2.html new file mode 100644 index 000000000..026f6abf7 --- /dev/null +++ b/packed_simd/type.m32x2.html @@ -0,0 +1,105 @@ +packed_simd::m32x2 - Rust

[][src]Type Definition packed_simd::m32x2

type m32x2 = Simd<[m32; 2]>;

A 64-bit vector mask with 2 m32 lanes.

+

Implementations

impl m32x2[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m32x2[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m32x2[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m32x2[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl m32x2[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m32x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl m32x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x2>[src]

Returns a wrapper that implements Ord.

+

impl m32x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m32x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitAndAssign<bool> for m32x2[src]

impl BitOr<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitOrAssign<bool> for m32x2[src]

impl BitXor<Simd<[m32; 2]>> for m32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m32; 2]>> for m32x2[src]

impl BitXorAssign<bool> for m32x2[src]

impl Debug for m32x2[src]

impl Default for m32x2[src]

impl Eq for m32x2[src]

impl From<[m32; 2]> for m32x2[src]

impl From<Simd<[m128; 2]>> for m32x2[src]

impl From<Simd<[m16; 2]>> for m32x2[src]

impl From<Simd<[m64; 2]>> for m32x2[src]

impl From<Simd<[m8; 2]>> for m32x2[src]

impl FromCast<Simd<[f32; 2]>> for m32x2[src]

impl FromCast<Simd<[f64; 2]>> for m32x2[src]

impl FromCast<Simd<[i128; 2]>> for m32x2[src]

impl FromCast<Simd<[i16; 2]>> for m32x2[src]

impl FromCast<Simd<[i32; 2]>> for m32x2[src]

impl FromCast<Simd<[i64; 2]>> for m32x2[src]

impl FromCast<Simd<[i8; 2]>> for m32x2[src]

impl FromCast<Simd<[isize; 2]>> for m32x2[src]

impl FromCast<Simd<[m128; 2]>> for m32x2[src]

impl FromCast<Simd<[m16; 2]>> for m32x2[src]

impl FromCast<Simd<[m64; 2]>> for m32x2[src]

impl FromCast<Simd<[m8; 2]>> for m32x2[src]

impl FromCast<Simd<[msize; 2]>> for m32x2[src]

impl FromCast<Simd<[u128; 2]>> for m32x2[src]

impl FromCast<Simd<[u16; 2]>> for m32x2[src]

impl FromCast<Simd<[u32; 2]>> for m32x2[src]

impl FromCast<Simd<[u64; 2]>> for m32x2[src]

impl FromCast<Simd<[u8; 2]>> for m32x2[src]

impl FromCast<Simd<[usize; 2]>> for m32x2[src]

impl Not for m32x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m32; 2]>> for m32x2[src]

impl Simd for m32x2[src]

type Element = m32

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m32x4.html b/packed_simd/type.m32x4.html new file mode 100644 index 000000000..fe1e8ec81 --- /dev/null +++ b/packed_simd/type.m32x4.html @@ -0,0 +1,106 @@ +packed_simd::m32x4 - Rust

[][src]Type Definition packed_simd::m32x4

type m32x4 = Simd<[m32; 4]>;

A 128-bit vector mask with 4 m32 lanes.

+

Implementations

impl m32x4[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m32x4[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m32x4[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m32x4[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl m32x4[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m32x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl m32x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x4>[src]

Returns a wrapper that implements Ord.

+

impl m32x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m32x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitAndAssign<bool> for m32x4[src]

impl BitOr<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitOrAssign<bool> for m32x4[src]

impl BitXor<Simd<[m32; 4]>> for m32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m32; 4]>> for m32x4[src]

impl BitXorAssign<bool> for m32x4[src]

impl Debug for m32x4[src]

impl Default for m32x4[src]

impl Eq for m32x4[src]

impl From<[m32; 4]> for m32x4[src]

impl From<Simd<[m16; 4]>> for m32x4[src]

impl From<Simd<[m64; 4]>> for m32x4[src]

impl From<Simd<[m8; 4]>> for m32x4[src]

impl FromBits<Simd<[m128; 1]>> for m32x4[src]

impl FromBits<Simd<[m64; 2]>> for m32x4[src]

impl FromCast<Simd<[f32; 4]>> for m32x4[src]

impl FromCast<Simd<[f64; 4]>> for m32x4[src]

impl FromCast<Simd<[i128; 4]>> for m32x4[src]

impl FromCast<Simd<[i16; 4]>> for m32x4[src]

impl FromCast<Simd<[i32; 4]>> for m32x4[src]

impl FromCast<Simd<[i64; 4]>> for m32x4[src]

impl FromCast<Simd<[i8; 4]>> for m32x4[src]

impl FromCast<Simd<[isize; 4]>> for m32x4[src]

impl FromCast<Simd<[m128; 4]>> for m32x4[src]

impl FromCast<Simd<[m16; 4]>> for m32x4[src]

impl FromCast<Simd<[m64; 4]>> for m32x4[src]

impl FromCast<Simd<[m8; 4]>> for m32x4[src]

impl FromCast<Simd<[msize; 4]>> for m32x4[src]

impl FromCast<Simd<[u128; 4]>> for m32x4[src]

impl FromCast<Simd<[u16; 4]>> for m32x4[src]

impl FromCast<Simd<[u32; 4]>> for m32x4[src]

impl FromCast<Simd<[u64; 4]>> for m32x4[src]

impl FromCast<Simd<[u8; 4]>> for m32x4[src]

impl FromCast<Simd<[usize; 4]>> for m32x4[src]

impl Not for m32x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m32; 4]>> for m32x4[src]

impl Simd for m32x4[src]

type Element = m32

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m32x8.html b/packed_simd/type.m32x8.html new file mode 100644 index 000000000..0792cc1b0 --- /dev/null +++ b/packed_simd/type.m32x8.html @@ -0,0 +1,102 @@ +packed_simd::m32x8 - Rust

[][src]Type Definition packed_simd::m32x8

type m32x8 = Simd<[m32; 8]>;

A 256-bit vector mask with 8 m32 lanes.

+

Implementations

impl m32x8[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m32x8[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m32x8[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m32x8[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl m32x8[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m32; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m32x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m32x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl m32x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m32x8>[src]

Returns a wrapper that implements Ord.

+

impl m32x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m32x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitAndAssign<bool> for m32x8[src]

impl BitOr<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitOrAssign<bool> for m32x8[src]

impl BitXor<Simd<[m32; 8]>> for m32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m32; 8]>> for m32x8[src]

impl BitXorAssign<bool> for m32x8[src]

impl Debug for m32x8[src]

impl Default for m32x8[src]

impl Eq for m32x8[src]

impl From<[m32; 8]> for m32x8[src]

impl From<Simd<[m16; 8]>> for m32x8[src]

impl From<Simd<[m8; 8]>> for m32x8[src]

impl FromBits<Simd<[m128; 2]>> for m32x8[src]

impl FromBits<Simd<[m64; 4]>> for m32x8[src]

impl FromCast<Simd<[f32; 8]>> for m32x8[src]

impl FromCast<Simd<[f64; 8]>> for m32x8[src]

impl FromCast<Simd<[i16; 8]>> for m32x8[src]

impl FromCast<Simd<[i32; 8]>> for m32x8[src]

impl FromCast<Simd<[i64; 8]>> for m32x8[src]

impl FromCast<Simd<[i8; 8]>> for m32x8[src]

impl FromCast<Simd<[isize; 8]>> for m32x8[src]

impl FromCast<Simd<[m16; 8]>> for m32x8[src]

impl FromCast<Simd<[m64; 8]>> for m32x8[src]

impl FromCast<Simd<[m8; 8]>> for m32x8[src]

impl FromCast<Simd<[msize; 8]>> for m32x8[src]

impl FromCast<Simd<[u16; 8]>> for m32x8[src]

impl FromCast<Simd<[u32; 8]>> for m32x8[src]

impl FromCast<Simd<[u64; 8]>> for m32x8[src]

impl FromCast<Simd<[u8; 8]>> for m32x8[src]

impl FromCast<Simd<[usize; 8]>> for m32x8[src]

impl Not for m32x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m32; 8]>> for m32x8[src]

impl Simd for m32x8[src]

type Element = m32

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m64x2.html b/packed_simd/type.m64x2.html new file mode 100644 index 000000000..130f8b321 --- /dev/null +++ b/packed_simd/type.m64x2.html @@ -0,0 +1,106 @@ +packed_simd::m64x2 - Rust

[][src]Type Definition packed_simd::m64x2

type m64x2 = Simd<[m64; 2]>;

A 128-bit vector mask with 2 m64 lanes.

+

Implementations

impl m64x2[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m64x2[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m64x2[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m64x2[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl m64x2[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m64x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl m64x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x2>[src]

Returns a wrapper that implements Ord.

+

impl m64x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m64x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitAndAssign<bool> for m64x2[src]

impl BitOr<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitOrAssign<bool> for m64x2[src]

impl BitXor<Simd<[m64; 2]>> for m64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m64; 2]>> for m64x2[src]

impl BitXorAssign<bool> for m64x2[src]

impl Debug for m64x2[src]

impl Default for m64x2[src]

impl Eq for m64x2[src]

impl From<[m64; 2]> for m64x2[src]

impl From<Simd<[m128; 2]>> for m64x2[src]

impl From<Simd<[m16; 2]>> for m64x2[src]

impl From<Simd<[m32; 2]>> for m64x2[src]

impl From<Simd<[m8; 2]>> for m64x2[src]

impl FromBits<Simd<[m128; 1]>> for m64x2[src]

impl FromCast<Simd<[f32; 2]>> for m64x2[src]

impl FromCast<Simd<[f64; 2]>> for m64x2[src]

impl FromCast<Simd<[i128; 2]>> for m64x2[src]

impl FromCast<Simd<[i16; 2]>> for m64x2[src]

impl FromCast<Simd<[i32; 2]>> for m64x2[src]

impl FromCast<Simd<[i64; 2]>> for m64x2[src]

impl FromCast<Simd<[i8; 2]>> for m64x2[src]

impl FromCast<Simd<[isize; 2]>> for m64x2[src]

impl FromCast<Simd<[m128; 2]>> for m64x2[src]

impl FromCast<Simd<[m16; 2]>> for m64x2[src]

impl FromCast<Simd<[m32; 2]>> for m64x2[src]

impl FromCast<Simd<[m8; 2]>> for m64x2[src]

impl FromCast<Simd<[msize; 2]>> for m64x2[src]

impl FromCast<Simd<[u128; 2]>> for m64x2[src]

impl FromCast<Simd<[u16; 2]>> for m64x2[src]

impl FromCast<Simd<[u32; 2]>> for m64x2[src]

impl FromCast<Simd<[u64; 2]>> for m64x2[src]

impl FromCast<Simd<[u8; 2]>> for m64x2[src]

impl FromCast<Simd<[usize; 2]>> for m64x2[src]

impl Not for m64x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m64; 2]>> for m64x2[src]

impl Simd for m64x2[src]

type Element = m64

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m64x4.html b/packed_simd/type.m64x4.html new file mode 100644 index 000000000..85868b759 --- /dev/null +++ b/packed_simd/type.m64x4.html @@ -0,0 +1,105 @@ +packed_simd::m64x4 - Rust

[][src]Type Definition packed_simd::m64x4

type m64x4 = Simd<[m64; 4]>;

A 256-bit vector mask with 4 m64 lanes.

+

Implementations

impl m64x4[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m64x4[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m64x4[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m64x4[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl m64x4[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m64x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl m64x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x4>[src]

Returns a wrapper that implements Ord.

+

impl m64x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m64x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitAndAssign<bool> for m64x4[src]

impl BitOr<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitOrAssign<bool> for m64x4[src]

impl BitXor<Simd<[m64; 4]>> for m64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m64; 4]>> for m64x4[src]

impl BitXorAssign<bool> for m64x4[src]

impl Debug for m64x4[src]

impl Default for m64x4[src]

impl Eq for m64x4[src]

impl From<[m64; 4]> for m64x4[src]

impl From<Simd<[m16; 4]>> for m64x4[src]

impl From<Simd<[m32; 4]>> for m64x4[src]

impl From<Simd<[m8; 4]>> for m64x4[src]

impl FromBits<Simd<[m128; 2]>> for m64x4[src]

impl FromCast<Simd<[f32; 4]>> for m64x4[src]

impl FromCast<Simd<[f64; 4]>> for m64x4[src]

impl FromCast<Simd<[i128; 4]>> for m64x4[src]

impl FromCast<Simd<[i16; 4]>> for m64x4[src]

impl FromCast<Simd<[i32; 4]>> for m64x4[src]

impl FromCast<Simd<[i64; 4]>> for m64x4[src]

impl FromCast<Simd<[i8; 4]>> for m64x4[src]

impl FromCast<Simd<[isize; 4]>> for m64x4[src]

impl FromCast<Simd<[m128; 4]>> for m64x4[src]

impl FromCast<Simd<[m16; 4]>> for m64x4[src]

impl FromCast<Simd<[m32; 4]>> for m64x4[src]

impl FromCast<Simd<[m8; 4]>> for m64x4[src]

impl FromCast<Simd<[msize; 4]>> for m64x4[src]

impl FromCast<Simd<[u128; 4]>> for m64x4[src]

impl FromCast<Simd<[u16; 4]>> for m64x4[src]

impl FromCast<Simd<[u32; 4]>> for m64x4[src]

impl FromCast<Simd<[u64; 4]>> for m64x4[src]

impl FromCast<Simd<[u8; 4]>> for m64x4[src]

impl FromCast<Simd<[usize; 4]>> for m64x4[src]

impl Not for m64x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m64; 4]>> for m64x4[src]

impl Simd for m64x4[src]

type Element = m64

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m64x8.html b/packed_simd/type.m64x8.html new file mode 100644 index 000000000..400e7d6e2 --- /dev/null +++ b/packed_simd/type.m64x8.html @@ -0,0 +1,102 @@ +packed_simd::m64x8 - Rust

[][src]Type Definition packed_simd::m64x8

type m64x8 = Simd<[m64; 8]>;

A 512-bit vector mask with 8 m64 lanes.

+

Implementations

impl m64x8[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m64x8[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m64x8[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m64x8[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl m64x8[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m64; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m64x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m64x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl m64x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m64x8>[src]

Returns a wrapper that implements Ord.

+

impl m64x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m64x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitAndAssign<bool> for m64x8[src]

impl BitOr<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitOrAssign<bool> for m64x8[src]

impl BitXor<Simd<[m64; 8]>> for m64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m64; 8]>> for m64x8[src]

impl BitXorAssign<bool> for m64x8[src]

impl Debug for m64x8[src]

impl Default for m64x8[src]

impl Eq for m64x8[src]

impl From<[m64; 8]> for m64x8[src]

impl From<Simd<[m16; 8]>> for m64x8[src]

impl From<Simd<[m32; 8]>> for m64x8[src]

impl From<Simd<[m8; 8]>> for m64x8[src]

impl FromBits<Simd<[m128; 4]>> for m64x8[src]

impl FromCast<Simd<[f32; 8]>> for m64x8[src]

impl FromCast<Simd<[f64; 8]>> for m64x8[src]

impl FromCast<Simd<[i16; 8]>> for m64x8[src]

impl FromCast<Simd<[i32; 8]>> for m64x8[src]

impl FromCast<Simd<[i64; 8]>> for m64x8[src]

impl FromCast<Simd<[i8; 8]>> for m64x8[src]

impl FromCast<Simd<[isize; 8]>> for m64x8[src]

impl FromCast<Simd<[m16; 8]>> for m64x8[src]

impl FromCast<Simd<[m32; 8]>> for m64x8[src]

impl FromCast<Simd<[m8; 8]>> for m64x8[src]

impl FromCast<Simd<[msize; 8]>> for m64x8[src]

impl FromCast<Simd<[u16; 8]>> for m64x8[src]

impl FromCast<Simd<[u32; 8]>> for m64x8[src]

impl FromCast<Simd<[u64; 8]>> for m64x8[src]

impl FromCast<Simd<[u8; 8]>> for m64x8[src]

impl FromCast<Simd<[usize; 8]>> for m64x8[src]

impl Not for m64x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m64; 8]>> for m64x8[src]

impl Simd for m64x8[src]

type Element = m64

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m8x16.html b/packed_simd/type.m8x16.html new file mode 100644 index 000000000..616874e65 --- /dev/null +++ b/packed_simd/type.m8x16.html @@ -0,0 +1,96 @@ +packed_simd::m8x16 - Rust

[][src]Type Definition packed_simd::m8x16

type m8x16 = Simd<[m8; 16]>;

A 128-bit vector mask with 16 m8 lanes.

+

Implementations

impl m8x16[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m8x16[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m8x16[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m8x16[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl m8x16[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 16] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m8x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl m8x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x16>[src]

Returns a wrapper that implements Ord.

+

impl m8x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m8x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitAndAssign<bool> for m8x16[src]

impl BitOr<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitOrAssign<bool> for m8x16[src]

impl BitXor<Simd<[m8; 16]>> for m8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m8; 16]>> for m8x16[src]

impl BitXorAssign<bool> for m8x16[src]

impl Debug for m8x16[src]

impl Default for m8x16[src]

impl Eq for m8x16[src]

impl From<[m8; 16]> for m8x16[src]

impl From<Simd<[m16; 16]>> for m8x16[src]

impl FromBits<Simd<[m128; 1]>> for m8x16[src]

impl FromBits<Simd<[m16; 8]>> for m8x16[src]

impl FromBits<Simd<[m32; 4]>> for m8x16[src]

impl FromBits<Simd<[m64; 2]>> for m8x16[src]

impl FromCast<Simd<[f32; 16]>> for m8x16[src]

impl FromCast<Simd<[i16; 16]>> for m8x16[src]

impl FromCast<Simd<[i32; 16]>> for m8x16[src]

impl FromCast<Simd<[i8; 16]>> for m8x16[src]

impl FromCast<Simd<[m16; 16]>> for m8x16[src]

impl FromCast<Simd<[m32; 16]>> for m8x16[src]

impl FromCast<Simd<[u16; 16]>> for m8x16[src]

impl FromCast<Simd<[u32; 16]>> for m8x16[src]

impl FromCast<Simd<[u8; 16]>> for m8x16[src]

impl Not for m8x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m8; 16]>> for m8x16[src]

impl Simd for m8x16[src]

type Element = m8

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m8x2.html b/packed_simd/type.m8x2.html new file mode 100644 index 000000000..2423e8c79 --- /dev/null +++ b/packed_simd/type.m8x2.html @@ -0,0 +1,105 @@ +packed_simd::m8x2 - Rust

[][src]Type Definition packed_simd::m8x2

type m8x2 = Simd<[m8; 2]>;

A 16-bit vector mask with 2 m8 lanes.

+

Implementations

impl m8x2[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m8x2[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m8x2[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m8x2[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl m8x2[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m8x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl m8x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x2>[src]

Returns a wrapper that implements Ord.

+

impl m8x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m8x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitAndAssign<bool> for m8x2[src]

impl BitOr<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitOrAssign<bool> for m8x2[src]

impl BitXor<Simd<[m8; 2]>> for m8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m8; 2]>> for m8x2[src]

impl BitXorAssign<bool> for m8x2[src]

impl Debug for m8x2[src]

impl Default for m8x2[src]

impl Eq for m8x2[src]

impl From<[m8; 2]> for m8x2[src]

impl From<Simd<[m128; 2]>> for m8x2[src]

impl From<Simd<[m16; 2]>> for m8x2[src]

impl From<Simd<[m32; 2]>> for m8x2[src]

impl From<Simd<[m64; 2]>> for m8x2[src]

impl FromCast<Simd<[f32; 2]>> for m8x2[src]

impl FromCast<Simd<[f64; 2]>> for m8x2[src]

impl FromCast<Simd<[i128; 2]>> for m8x2[src]

impl FromCast<Simd<[i16; 2]>> for m8x2[src]

impl FromCast<Simd<[i32; 2]>> for m8x2[src]

impl FromCast<Simd<[i64; 2]>> for m8x2[src]

impl FromCast<Simd<[i8; 2]>> for m8x2[src]

impl FromCast<Simd<[isize; 2]>> for m8x2[src]

impl FromCast<Simd<[m128; 2]>> for m8x2[src]

impl FromCast<Simd<[m16; 2]>> for m8x2[src]

impl FromCast<Simd<[m32; 2]>> for m8x2[src]

impl FromCast<Simd<[m64; 2]>> for m8x2[src]

impl FromCast<Simd<[msize; 2]>> for m8x2[src]

impl FromCast<Simd<[u128; 2]>> for m8x2[src]

impl FromCast<Simd<[u16; 2]>> for m8x2[src]

impl FromCast<Simd<[u32; 2]>> for m8x2[src]

impl FromCast<Simd<[u64; 2]>> for m8x2[src]

impl FromCast<Simd<[u8; 2]>> for m8x2[src]

impl FromCast<Simd<[usize; 2]>> for m8x2[src]

impl Not for m8x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m8; 2]>> for m8x2[src]

impl Simd for m8x2[src]

type Element = m8

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m8x32.html b/packed_simd/type.m8x32.html new file mode 100644 index 000000000..5329067b6 --- /dev/null +++ b/packed_simd/type.m8x32.html @@ -0,0 +1,91 @@ +packed_simd::m8x32 - Rust

[][src]Type Definition packed_simd::m8x32

type m8x32 = Simd<[m8; 32]>;

A 256-bit vector mask with 32 m8 lanes.

+

Implementations

impl m8x32[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m8x32[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m8x32[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m8x32[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl m8x32[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 32] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m8x32[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl m8x32[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x32>[src]

Returns a wrapper that implements Ord.

+

impl m8x32[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m8x32[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitAndAssign<bool> for m8x32[src]

impl BitOr<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitOrAssign<bool> for m8x32[src]

impl BitXor<Simd<[m8; 32]>> for m8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m8; 32]>> for m8x32[src]

impl BitXorAssign<bool> for m8x32[src]

impl Debug for m8x32[src]

impl Default for m8x32[src]

impl Eq for m8x32[src]

impl From<[m8; 32]> for m8x32[src]

impl FromBits<Simd<[m128; 2]>> for m8x32[src]

impl FromBits<Simd<[m16; 16]>> for m8x32[src]

impl FromBits<Simd<[m32; 8]>> for m8x32[src]

impl FromBits<Simd<[m64; 4]>> for m8x32[src]

impl FromCast<Simd<[i16; 32]>> for m8x32[src]

impl FromCast<Simd<[i8; 32]>> for m8x32[src]

impl FromCast<Simd<[m16; 32]>> for m8x32[src]

impl FromCast<Simd<[u16; 32]>> for m8x32[src]

impl FromCast<Simd<[u8; 32]>> for m8x32[src]

impl Not for m8x32[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m8; 32]>> for m8x32[src]

impl Simd for m8x32[src]

type Element = m8

Element type of the SIMD vector

+

type LanesType = [u32; 32]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m8x4.html b/packed_simd/type.m8x4.html new file mode 100644 index 000000000..7f4d01c83 --- /dev/null +++ b/packed_simd/type.m8x4.html @@ -0,0 +1,105 @@ +packed_simd::m8x4 - Rust

[][src]Type Definition packed_simd::m8x4

type m8x4 = Simd<[m8; 4]>;

A 32-bit vector mask with 4 m8 lanes.

+

Implementations

impl m8x4[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m8x4[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m8x4[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m8x4[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl m8x4[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m8x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl m8x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x4>[src]

Returns a wrapper that implements Ord.

+

impl m8x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m8x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitAndAssign<bool> for m8x4[src]

impl BitOr<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitOrAssign<bool> for m8x4[src]

impl BitXor<Simd<[m8; 4]>> for m8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m8; 4]>> for m8x4[src]

impl BitXorAssign<bool> for m8x4[src]

impl Debug for m8x4[src]

impl Default for m8x4[src]

impl Eq for m8x4[src]

impl From<[m8; 4]> for m8x4[src]

impl From<Simd<[m16; 4]>> for m8x4[src]

impl From<Simd<[m32; 4]>> for m8x4[src]

impl From<Simd<[m64; 4]>> for m8x4[src]

impl FromBits<Simd<[m16; 2]>> for m8x4[src]

impl FromCast<Simd<[f32; 4]>> for m8x4[src]

impl FromCast<Simd<[f64; 4]>> for m8x4[src]

impl FromCast<Simd<[i128; 4]>> for m8x4[src]

impl FromCast<Simd<[i16; 4]>> for m8x4[src]

impl FromCast<Simd<[i32; 4]>> for m8x4[src]

impl FromCast<Simd<[i64; 4]>> for m8x4[src]

impl FromCast<Simd<[i8; 4]>> for m8x4[src]

impl FromCast<Simd<[isize; 4]>> for m8x4[src]

impl FromCast<Simd<[m128; 4]>> for m8x4[src]

impl FromCast<Simd<[m16; 4]>> for m8x4[src]

impl FromCast<Simd<[m32; 4]>> for m8x4[src]

impl FromCast<Simd<[m64; 4]>> for m8x4[src]

impl FromCast<Simd<[msize; 4]>> for m8x4[src]

impl FromCast<Simd<[u128; 4]>> for m8x4[src]

impl FromCast<Simd<[u16; 4]>> for m8x4[src]

impl FromCast<Simd<[u32; 4]>> for m8x4[src]

impl FromCast<Simd<[u64; 4]>> for m8x4[src]

impl FromCast<Simd<[u8; 4]>> for m8x4[src]

impl FromCast<Simd<[usize; 4]>> for m8x4[src]

impl Not for m8x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m8; 4]>> for m8x4[src]

impl Simd for m8x4[src]

type Element = m8

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m8x64.html b/packed_simd/type.m8x64.html new file mode 100644 index 000000000..2974d9f50 --- /dev/null +++ b/packed_simd/type.m8x64.html @@ -0,0 +1,88 @@ +packed_simd::m8x64 - Rust

[][src]Type Definition packed_simd::m8x64

type m8x64 = Simd<[m8; 64]>;

A 512-bit vector mask with 64 m8 lanes.

+

Implementations

impl m8x64[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool,
    x8: bool,
    x9: bool,
    x10: bool,
    x11: bool,
    x12: bool,
    x13: bool,
    x14: bool,
    x15: bool,
    x16: bool,
    x17: bool,
    x18: bool,
    x19: bool,
    x20: bool,
    x21: bool,
    x22: bool,
    x23: bool,
    x24: bool,
    x25: bool,
    x26: bool,
    x27: bool,
    x28: bool,
    x29: bool,
    x30: bool,
    x31: bool,
    x32: bool,
    x33: bool,
    x34: bool,
    x35: bool,
    x36: bool,
    x37: bool,
    x38: bool,
    x39: bool,
    x40: bool,
    x41: bool,
    x42: bool,
    x43: bool,
    x44: bool,
    x45: bool,
    x46: bool,
    x47: bool,
    x48: bool,
    x49: bool,
    x50: bool,
    x51: bool,
    x52: bool,
    x53: bool,
    x54: bool,
    x55: bool,
    x56: bool,
    x57: bool,
    x58: bool,
    x59: bool,
    x60: bool,
    x61: bool,
    x62: bool,
    x63: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m8x64[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m8x64[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m8x64[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

+

impl m8x64[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 64] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m8x64[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x64>[src]

Returns a wrapper that implements PartialOrd.

+

impl m8x64[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x64>[src]

Returns a wrapper that implements Ord.

+

impl m8x64[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m8x64[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitAndAssign<bool> for m8x64[src]

impl BitOr<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitOrAssign<bool> for m8x64[src]

impl BitXor<Simd<[m8; 64]>> for m8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m8; 64]>> for m8x64[src]

impl BitXorAssign<bool> for m8x64[src]

impl Debug for m8x64[src]

impl Default for m8x64[src]

impl Eq for m8x64[src]

impl From<[m8; 64]> for m8x64[src]

impl FromBits<Simd<[m128; 4]>> for m8x64[src]

impl FromBits<Simd<[m16; 32]>> for m8x64[src]

impl FromBits<Simd<[m32; 16]>> for m8x64[src]

impl FromBits<Simd<[m64; 8]>> for m8x64[src]

impl FromCast<Simd<[i8; 64]>> for m8x64[src]

impl FromCast<Simd<[u8; 64]>> for m8x64[src]

impl Not for m8x64[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m8; 64]>> for m8x64[src]

impl Simd for m8x64[src]

type Element = m8

Element type of the SIMD vector

+

type LanesType = [u32; 64]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.m8x8.html b/packed_simd/type.m8x8.html new file mode 100644 index 000000000..26ed77278 --- /dev/null +++ b/packed_simd/type.m8x8.html @@ -0,0 +1,102 @@ +packed_simd::m8x8 - Rust

[][src]Type Definition packed_simd::m8x8

type m8x8 = Simd<[m8; 8]>;

A 64-bit vector mask with 8 m8 lanes.

+

Implementations

impl m8x8[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl m8x8[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl m8x8[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl m8x8[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl m8x8[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[m8; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl m8x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<m8x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl m8x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<m8x8>[src]

Returns a wrapper that implements Ord.

+

impl m8x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl m8x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for m8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitAndAssign<bool> for m8x8[src]

impl BitOr<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for m8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitOrAssign<bool> for m8x8[src]

impl BitXor<Simd<[m8; 8]>> for m8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for m8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[m8; 8]>> for m8x8[src]

impl BitXorAssign<bool> for m8x8[src]

impl Debug for m8x8[src]

impl Default for m8x8[src]

impl Eq for m8x8[src]

impl From<[m8; 8]> for m8x8[src]

impl From<Simd<[m16; 8]>> for m8x8[src]

impl From<Simd<[m32; 8]>> for m8x8[src]

impl FromBits<Simd<[m16; 4]>> for m8x8[src]

impl FromBits<Simd<[m32; 2]>> for m8x8[src]

impl FromCast<Simd<[f32; 8]>> for m8x8[src]

impl FromCast<Simd<[f64; 8]>> for m8x8[src]

impl FromCast<Simd<[i16; 8]>> for m8x8[src]

impl FromCast<Simd<[i32; 8]>> for m8x8[src]

impl FromCast<Simd<[i64; 8]>> for m8x8[src]

impl FromCast<Simd<[i8; 8]>> for m8x8[src]

impl FromCast<Simd<[isize; 8]>> for m8x8[src]

impl FromCast<Simd<[m16; 8]>> for m8x8[src]

impl FromCast<Simd<[m32; 8]>> for m8x8[src]

impl FromCast<Simd<[m64; 8]>> for m8x8[src]

impl FromCast<Simd<[msize; 8]>> for m8x8[src]

impl FromCast<Simd<[u16; 8]>> for m8x8[src]

impl FromCast<Simd<[u32; 8]>> for m8x8[src]

impl FromCast<Simd<[u64; 8]>> for m8x8[src]

impl FromCast<Simd<[u8; 8]>> for m8x8[src]

impl FromCast<Simd<[usize; 8]>> for m8x8[src]

impl Not for m8x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[m8; 8]>> for m8x8[src]

impl Simd for m8x8[src]

type Element = m8

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.mptrx2.html b/packed_simd/type.mptrx2.html new file mode 100644 index 000000000..eb4825051 --- /dev/null +++ b/packed_simd/type.mptrx2.html @@ -0,0 +1,284 @@ +packed_simd::mptrx2 - Rust

[][src]Type Definition packed_simd::mptrx2

type mptrx2<T> = Simd<[*mut T; 2]>;

A vector with 2 *mut T lanes

+

Implementations

impl<T> mptrx2<T>[src]

pub const fn new(x0: *mut T, x1: *mut T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex2[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> mptrx2<T>[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> mptrx2<T>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> mptrx2<T>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> mptrx2<T>[src]

pub unsafe fn offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex2[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex2) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> mptrx2<T>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> mptrx2<T> where
    [T; 2]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 2]>,
    value: Simd<[T; 2]>
) -> Simd<[T; 2]> where
    M: Mask,
    [M; 2]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> mptrx2<T> where
    [T; 2]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 2]>, value: Simd<[T; 2]>) where
    M: Mask,
    [M; 2]: SimdArray
[src]

Writes selected vector elements to memory.

+

Writes the lanes of values for which the mask is true to +their corresponding memory addresses in self.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Overlapping memory addresses of self are written to in order +from the lest-significant to the most-significant element.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

Trait Implementations

impl<T> Debug for mptrx2<T>[src]

impl<T> Default for mptrx2<T>[src]

impl<T> Eq for mptrx2<T>[src]

impl<T> From<[*mut T; 2]> for mptrx2<T>[src]

impl<T> Hash for mptrx2<T>[src]

impl<T> Into<[*mut T; 2]> for mptrx2<T>[src]

impl<T> PartialEq<Simd<[*mut T; 2]>> for mptrx2<T>[src]

impl<T> Simd for mptrx2<T>[src]

type Element = *mut T

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.mptrx4.html b/packed_simd/type.mptrx4.html new file mode 100644 index 000000000..77201519f --- /dev/null +++ b/packed_simd/type.mptrx4.html @@ -0,0 +1,284 @@ +packed_simd::mptrx4 - Rust

[][src]Type Definition packed_simd::mptrx4

type mptrx4<T> = Simd<[*mut T; 4]>;

A vector with 4 *mut T lanes

+

Implementations

impl<T> mptrx4<T>[src]

pub const fn new(x0: *mut T, x1: *mut T, x2: *mut T, x3: *mut T) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex4[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> mptrx4<T>[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> mptrx4<T>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> mptrx4<T>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> mptrx4<T>[src]

pub unsafe fn offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex4[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex4) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> mptrx4<T>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> mptrx4<T> where
    [T; 4]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 4]>,
    value: Simd<[T; 4]>
) -> Simd<[T; 4]> where
    M: Mask,
    [M; 4]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> mptrx4<T> where
    [T; 4]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 4]>, value: Simd<[T; 4]>) where
    M: Mask,
    [M; 4]: SimdArray
[src]

Writes selected vector elements to memory.

+

Writes the lanes of values for which the mask is true to +their corresponding memory addresses in self.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Overlapping memory addresses of self are written to in order +from the lest-significant to the most-significant element.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

Trait Implementations

impl<T> Debug for mptrx4<T>[src]

impl<T> Default for mptrx4<T>[src]

impl<T> Eq for mptrx4<T>[src]

impl<T> From<[*mut T; 4]> for mptrx4<T>[src]

impl<T> Hash for mptrx4<T>[src]

impl<T> Into<[*mut T; 4]> for mptrx4<T>[src]

impl<T> PartialEq<Simd<[*mut T; 4]>> for mptrx4<T>[src]

impl<T> Simd for mptrx4<T>[src]

type Element = *mut T

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.mptrx8.html b/packed_simd/type.mptrx8.html new file mode 100644 index 000000000..e5c6510de --- /dev/null +++ b/packed_simd/type.mptrx8.html @@ -0,0 +1,284 @@ +packed_simd::mptrx8 - Rust

[][src]Type Definition packed_simd::mptrx8

type mptrx8<T> = Simd<[*mut T; 8]>;

A vector with 8 *mut T lanes

+

Implementations

impl<T> mptrx8<T>[src]

pub const fn new(
    x0: *mut T,
    x1: *mut T,
    x2: *mut T,
    x3: *mut T,
    x4: *mut T,
    x5: *mut T,
    x6: *mut T,
    x7: *mut T
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: *mut T) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub const fn null() -> Self[src]

Constructs a new instance with each element initialized to +null.

+

pub fn is_null(self) -> msizex8[src]

Returns a mask that selects those lanes that contain null +pointers.

+

pub fn extract(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> *mut T[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: *mut T) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl<T> mptrx8<T>[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl<T> mptrx8<T>[src]

pub fn from_slice_aligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[*mut T]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> mptrx8<T>[src]

pub fn write_to_slice_aligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [*mut T])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl<T> mptrx8<T>[src]

pub unsafe fn offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum, in bytes +must fit in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().offset(vec.len() as isize) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset(self, count: isizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic.

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .offset(count) instead when possible, because +offset allows the compiler to optimize better.

+

pub unsafe fn offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

This function is the inverse of offset.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and other pointer must be either in bounds +or one byte past the end of the same allocated object.

    +
  • +
  • +

    The distance between the pointers, in bytes, cannot overflow +an isize.

    +
  • +
  • +

    The distance between the pointers, in bytes, must be an exact +multiple of the size of T.

    +
  • +
  • +

    The distance being in bounds cannot rely on "wrapping around" +the address space.

    +
  • +
+

The compiler and standard library generally try to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so ptr_into_vec.offset_from(vec.as_ptr()) +is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset_from instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_offset_from(self, origin: Self) -> isizex8[src]

Calculates the distance between two pointers.

+

The returned value is in units of T: the distance in bytes is +divided by mem::size_of::<T>().

+

If the address different between the two pointers is not a +multiple of mem::size_of::<T>() then the result of the +division is rounded towards zero.

+

Though this method is safe for any two pointers, note that its +result will be mostly useless if the two pointers aren't into +the same allocated object, for example if they point to two +different local variables.

+

pub unsafe fn add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset(count as isize)).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset, in bytes, cannot overflow an isize.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so vec.as_ptr().add(vec.len()) is always +safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table limitations or +splitting the address space. However, some 32-bit and 16-bit +platforms may successfully serve a request for more than +isize::MAX bytes with things like Physical Address Extension. +As such, memory acquired directly from allocators or memory +mapped files may be too large to handle with this function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub unsafe fn sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer (convenience for +.offset((count as isize).wrapping_neg())).

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

If any of the following conditions are violated, the result is +Undefined Behavior:

+
    +
  • +

    Both the starting and resulting pointer must be either in +bounds or one byte past the end of an allocated object.

    +
  • +
  • +

    The computed offset cannot exceed isize::MAX bytes.

    +
  • +
  • +

    The offset being in bounds cannot rely on "wrapping around" +the address space. That is, the infinite-precision sum must fit +in a usize.

    +
  • +
+

The compiler and standard library generally tries to ensure +allocations never reach a size where an offset is a concern. For +instance, Vec and Box ensure they never allocate more than +isize::MAX bytes, so +vec.as_ptr().add(vec.len()).sub(vec.len()) is always safe.

+

Most platforms fundamentally can't even construct such an +allocation. For instance, no known 64-bit platform can ever +serve a request for 263 bytes due to page-table +limitations or splitting the address space. However, some 32-bit +and 16-bit platforms may successfully serve a request for more +than isize::MAX bytes with things like Physical Address +Extension. As such, memory acquired directly from allocators or +memory mapped files may be too large to handle with this +function.

+

Consider using wrapping_offset instead if these constraints +are difficult to satisfy. The only advantage of this method is +that it enables more aggressive compiler optimizations.

+

pub fn wrapping_add(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset(count as isize))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .add(count) instead when possible, because add +allows the compiler to optimize better.

+

pub fn wrapping_sub(self, count: usizex8) -> Self[src]

Calculates the offset from a pointer using wrapping arithmetic. +(convenience for .wrapping_offset((count as isize).wrapping_sub()))

+

count is in units of T; e.g. a count of 3 represents a +pointer offset of 3 * size_of::<T>() bytes.

+

Safety

+

The resulting pointer does not need to be in bounds, but it is +potentially hazardous to dereference (which requires unsafe).

+

Always use .sub(count) instead when possible, because sub +allows the compiler to optimize better.

+

impl<T> mptrx8<T>[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl<T> mptrx8<T> where
    [T; 8]: SimdArray
[src]

pub unsafe fn read<M>(
    self,
    mask: Simd<[M; 8]>,
    value: Simd<[T; 8]>
) -> Simd<[T; 8]> where
    M: Mask,
    [M; 8]: SimdArray
[src]

Reads selected vector elements from memory.

+

Instantiates a new vector by reading the values from self for +those lanes whose mask is true, and using the elements of +value otherwise.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

impl<T> mptrx8<T> where
    [T; 8]: SimdArray
[src]

pub unsafe fn write<M>(self, mask: Simd<[M; 8]>, value: Simd<[T; 8]>) where
    M: Mask,
    [M; 8]: SimdArray
[src]

Writes selected vector elements to memory.

+

Writes the lanes of values for which the mask is true to +their corresponding memory addresses in self.

+

No memory is accessed for those lanes of self whose mask is +false.

+

Overlapping memory addresses of self are written to in order +from the lest-significant to the most-significant element.

+

Safety

+

This method is unsafe because it dereferences raw pointers. The +pointers must be aligned to mem::align_of::<T>().

+

Trait Implementations

impl<T> Debug for mptrx8<T>[src]

impl<T> Default for mptrx8<T>[src]

impl<T> Eq for mptrx8<T>[src]

impl<T> From<[*mut T; 8]> for mptrx8<T>[src]

impl<T> Hash for mptrx8<T>[src]

impl<T> Into<[*mut T; 8]> for mptrx8<T>[src]

impl<T> PartialEq<Simd<[*mut T; 8]>> for mptrx8<T>[src]

impl<T> Simd for mptrx8<T>[src]

type Element = *mut T

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.msizex2.html b/packed_simd/type.msizex2.html new file mode 100644 index 000000000..f247c4ee3 --- /dev/null +++ b/packed_simd/type.msizex2.html @@ -0,0 +1,101 @@ +packed_simd::msizex2 - Rust

[][src]Type Definition packed_simd::msizex2

type msizex2 = Simd<[msize; 2]>;

A vector mask with 2 msize lanes.

+

Implementations

impl msizex2[src]

pub const fn new(x0: bool, x1: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl msizex2[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl msizex2[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl msizex2[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl msizex2[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 2] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl msizex2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex2>[src]

Returns a wrapper that implements PartialOrd.

+

impl msizex2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex2>[src]

Returns a wrapper that implements Ord.

+

impl msizex2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl msizex2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for msizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitAndAssign<bool> for msizex2[src]

impl BitOr<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for msizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitOrAssign<bool> for msizex2[src]

impl BitXor<Simd<[msize; 2]>> for msizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for msizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[msize; 2]>> for msizex2[src]

impl BitXorAssign<bool> for msizex2[src]

impl Debug for msizex2[src]

impl Default for msizex2[src]

impl Eq for msizex2[src]

impl From<[msize; 2]> for msizex2[src]

impl FromCast<Simd<[f32; 2]>> for msizex2[src]

impl FromCast<Simd<[f64; 2]>> for msizex2[src]

impl FromCast<Simd<[i128; 2]>> for msizex2[src]

impl FromCast<Simd<[i16; 2]>> for msizex2[src]

impl FromCast<Simd<[i32; 2]>> for msizex2[src]

impl FromCast<Simd<[i64; 2]>> for msizex2[src]

impl FromCast<Simd<[i8; 2]>> for msizex2[src]

impl FromCast<Simd<[isize; 2]>> for msizex2[src]

impl FromCast<Simd<[m128; 2]>> for msizex2[src]

impl FromCast<Simd<[m16; 2]>> for msizex2[src]

impl FromCast<Simd<[m32; 2]>> for msizex2[src]

impl FromCast<Simd<[m64; 2]>> for msizex2[src]

impl FromCast<Simd<[m8; 2]>> for msizex2[src]

impl FromCast<Simd<[u128; 2]>> for msizex2[src]

impl FromCast<Simd<[u16; 2]>> for msizex2[src]

impl FromCast<Simd<[u32; 2]>> for msizex2[src]

impl FromCast<Simd<[u64; 2]>> for msizex2[src]

impl FromCast<Simd<[u8; 2]>> for msizex2[src]

impl FromCast<Simd<[usize; 2]>> for msizex2[src]

impl Not for msizex2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[msize; 2]>> for msizex2[src]

impl Simd for msizex2[src]

type Element = msize

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.msizex4.html b/packed_simd/type.msizex4.html new file mode 100644 index 000000000..679b0338a --- /dev/null +++ b/packed_simd/type.msizex4.html @@ -0,0 +1,101 @@ +packed_simd::msizex4 - Rust

[][src]Type Definition packed_simd::msizex4

type msizex4 = Simd<[msize; 4]>;

A vector mask with 4 msize lanes.

+

Implementations

impl msizex4[src]

pub const fn new(x0: bool, x1: bool, x2: bool, x3: bool) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl msizex4[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl msizex4[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl msizex4[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl msizex4[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 4] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl msizex4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex4>[src]

Returns a wrapper that implements PartialOrd.

+

impl msizex4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex4>[src]

Returns a wrapper that implements Ord.

+

impl msizex4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl msizex4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for msizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitAndAssign<bool> for msizex4[src]

impl BitOr<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for msizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitOrAssign<bool> for msizex4[src]

impl BitXor<Simd<[msize; 4]>> for msizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for msizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[msize; 4]>> for msizex4[src]

impl BitXorAssign<bool> for msizex4[src]

impl Debug for msizex4[src]

impl Default for msizex4[src]

impl Eq for msizex4[src]

impl From<[msize; 4]> for msizex4[src]

impl FromCast<Simd<[f32; 4]>> for msizex4[src]

impl FromCast<Simd<[f64; 4]>> for msizex4[src]

impl FromCast<Simd<[i128; 4]>> for msizex4[src]

impl FromCast<Simd<[i16; 4]>> for msizex4[src]

impl FromCast<Simd<[i32; 4]>> for msizex4[src]

impl FromCast<Simd<[i64; 4]>> for msizex4[src]

impl FromCast<Simd<[i8; 4]>> for msizex4[src]

impl FromCast<Simd<[isize; 4]>> for msizex4[src]

impl FromCast<Simd<[m128; 4]>> for msizex4[src]

impl FromCast<Simd<[m16; 4]>> for msizex4[src]

impl FromCast<Simd<[m32; 4]>> for msizex4[src]

impl FromCast<Simd<[m64; 4]>> for msizex4[src]

impl FromCast<Simd<[m8; 4]>> for msizex4[src]

impl FromCast<Simd<[u128; 4]>> for msizex4[src]

impl FromCast<Simd<[u16; 4]>> for msizex4[src]

impl FromCast<Simd<[u32; 4]>> for msizex4[src]

impl FromCast<Simd<[u64; 4]>> for msizex4[src]

impl FromCast<Simd<[u8; 4]>> for msizex4[src]

impl FromCast<Simd<[usize; 4]>> for msizex4[src]

impl Not for msizex4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[msize; 4]>> for msizex4[src]

impl Simd for msizex4[src]

type Element = msize

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.msizex8.html b/packed_simd/type.msizex8.html new file mode 100644 index 000000000..82845f24c --- /dev/null +++ b/packed_simd/type.msizex8.html @@ -0,0 +1,98 @@ +packed_simd::msizex8 - Rust

[][src]Type Definition packed_simd::msizex8

type msizex8 = Simd<[msize; 8]>;

A vector mask with 8 msize lanes.

+

Implementations

impl msizex8[src]

pub const fn new(
    x0: bool,
    x1: bool,
    x2: bool,
    x3: bool,
    x4: bool,
    x5: bool,
    x6: bool,
    x7: bool
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: bool) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> bool[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> bool[src]

Extracts the value at index.

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: bool) -> Self[src]

Returns a new vector where the value at index is replaced by +new_value.

+

Panics

+

If index >= Self::lanes().

+

impl msizex8[src]

pub fn and(self) -> bool[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> bool[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> bool[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl msizex8[src]

pub fn all(self) -> bool[src]

Are all vector lanes true?

+

pub fn any(self) -> bool[src]

Is any vector lane true?

+

pub fn none(self) -> bool[src]

Are all vector lanes false?

+

impl msizex8[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl msizex8[src]

pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T> where
    T: SimdArray<NT = <[msize; 8] as SimdArray>::NT>, 
[src]

Selects elements of a and b using mask.

+

The lanes of the result for which the mask is true contain +the values of a. The remaining lanes contain the values of +b.

+

impl msizex8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<msizex8>[src]

Returns a wrapper that implements PartialOrd.

+

impl msizex8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<msizex8>[src]

Returns a wrapper that implements Ord.

+

impl msizex8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl msizex8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl BitAnd<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<bool> for msizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitAndAssign<bool> for msizex8[src]

impl BitOr<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<bool> for msizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitOrAssign<bool> for msizex8[src]

impl BitXor<Simd<[msize; 8]>> for msizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<bool> for msizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[msize; 8]>> for msizex8[src]

impl BitXorAssign<bool> for msizex8[src]

impl Debug for msizex8[src]

impl Default for msizex8[src]

impl Eq for msizex8[src]

impl From<[msize; 8]> for msizex8[src]

impl FromCast<Simd<[f32; 8]>> for msizex8[src]

impl FromCast<Simd<[f64; 8]>> for msizex8[src]

impl FromCast<Simd<[i16; 8]>> for msizex8[src]

impl FromCast<Simd<[i32; 8]>> for msizex8[src]

impl FromCast<Simd<[i64; 8]>> for msizex8[src]

impl FromCast<Simd<[i8; 8]>> for msizex8[src]

impl FromCast<Simd<[isize; 8]>> for msizex8[src]

impl FromCast<Simd<[m16; 8]>> for msizex8[src]

impl FromCast<Simd<[m32; 8]>> for msizex8[src]

impl FromCast<Simd<[m64; 8]>> for msizex8[src]

impl FromCast<Simd<[m8; 8]>> for msizex8[src]

impl FromCast<Simd<[u16; 8]>> for msizex8[src]

impl FromCast<Simd<[u32; 8]>> for msizex8[src]

impl FromCast<Simd<[u64; 8]>> for msizex8[src]

impl FromCast<Simd<[u8; 8]>> for msizex8[src]

impl FromCast<Simd<[usize; 8]>> for msizex8[src]

impl Not for msizex8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl PartialEq<Simd<[msize; 8]>> for msizex8[src]

impl Simd for msizex8[src]

type Element = msize

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+
\ No newline at end of file diff --git a/packed_simd/type.u128x1.html b/packed_simd/type.u128x1.html new file mode 100644 index 000000000..6f365debd --- /dev/null +++ b/packed_simd/type.u128x1.html @@ -0,0 +1,231 @@ +packed_simd::u128x1 - Rust

[][src]Type Definition packed_simd::u128x1

type u128x1 = Simd<[u128; 1]>;

A 128-bit vector with 1 u128 lane.

+

Implementations

impl u128x1[src]

pub const fn new(x0: u128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u128x1[src]

pub fn rotate_left(self, n: u128x1) -> u128x1[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u128x1) -> u128x1[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u128x1[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u128x1[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u128x1[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

+

pub fn min_element(self) -> u128[src]

Smallest vector element value.

+

impl u128x1[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u128x1[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u128x1[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u128x1[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u128x1[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u128x1[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u128x1[src]

pub fn eq(self, other: Self) -> m128x1[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x1[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x1[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x1[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x1[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x1[src]

Lane-wise greater-than-or-equals comparison.

+

impl u128x1[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x1>[src]

Returns a wrapper that implements PartialOrd.

+

impl u128x1[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x1>[src]

Returns a wrapper that implements Ord.

+

impl u128x1[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u128> for u128x1[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u128; 1]>> for u128x1[src]

impl AddAssign<u128> for u128x1[src]

impl Binary for u128x1[src]

impl BitAnd<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u128> for u128x1[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitAndAssign<u128> for u128x1[src]

impl BitOr<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u128> for u128x1[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitOrAssign<u128> for u128x1[src]

impl BitXor<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u128> for u128x1[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u128; 1]>> for u128x1[src]

impl BitXorAssign<u128> for u128x1[src]

impl Debug for u128x1[src]

impl Default for u128x1[src]

impl Div<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u128> for u128x1[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u128; 1]>> for u128x1[src]

impl DivAssign<u128> for u128x1[src]

impl Eq for u128x1[src]

impl From<[u128; 1]> for u128x1[src]

impl FromBits<Simd<[f32; 4]>> for u128x1[src]

impl FromBits<Simd<[f64; 2]>> for u128x1[src]

impl FromBits<Simd<[i128; 1]>> for u128x1[src]

impl FromBits<Simd<[i16; 8]>> for u128x1[src]

impl FromBits<Simd<[i32; 4]>> for u128x1[src]

impl FromBits<Simd<[i64; 2]>> for u128x1[src]

impl FromBits<Simd<[i8; 16]>> for u128x1[src]

impl FromBits<Simd<[m128; 1]>> for u128x1[src]

impl FromBits<Simd<[m16; 8]>> for u128x1[src]

impl FromBits<Simd<[m32; 4]>> for u128x1[src]

impl FromBits<Simd<[m64; 2]>> for u128x1[src]

impl FromBits<Simd<[m8; 16]>> for u128x1[src]

impl FromBits<Simd<[u16; 8]>> for u128x1[src]

impl FromBits<Simd<[u32; 4]>> for u128x1[src]

impl FromBits<Simd<[u64; 2]>> for u128x1[src]

impl FromBits<Simd<[u8; 16]>> for u128x1[src]

impl FromBits<__m128> for u128x1[src]

impl FromBits<__m128d> for u128x1[src]

impl FromBits<__m128i> for u128x1[src]

impl FromCast<Simd<[i128; 1]>> for u128x1[src]

impl FromCast<Simd<[m128; 1]>> for u128x1[src]

impl Hash for u128x1[src]

impl LowerHex for u128x1[src]

impl Mul<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u128> for u128x1[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u128; 1]>> for u128x1[src]

impl MulAssign<u128> for u128x1[src]

impl Not for u128x1[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u128x1[src]

impl PartialEq<Simd<[u128; 1]>> for u128x1[src]

impl<'a> Product<&'a Simd<[u128; 1]>> for u128x1[src]

impl Product<Simd<[u128; 1]>> for u128x1[src]

impl Rem<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u128> for u128x1[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u128; 1]>> for u128x1[src]

impl RemAssign<u128> for u128x1[src]

impl Shl<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u128x1[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u128; 1]>> for u128x1[src]

impl ShlAssign<u32> for u128x1[src]

impl Shr<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u128x1[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u128; 1]>> for u128x1[src]

impl ShrAssign<u32> for u128x1[src]

impl Simd for u128x1[src]

type Element = u128

Element type of the SIMD vector

+

type LanesType = [u32; 1]

The type: [u32; Self::N].

+

impl Sub<Simd<[u128; 1]>> for u128x1[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u128> for u128x1[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u128; 1]>> for u128x1[src]

impl SubAssign<u128> for u128x1[src]

impl<'a> Sum<&'a Simd<[u128; 1]>> for u128x1[src]

impl Sum<Simd<[u128; 1]>> for u128x1[src]

impl UpperHex for u128x1[src]

\ No newline at end of file diff --git a/packed_simd/type.u128x2.html b/packed_simd/type.u128x2.html new file mode 100644 index 000000000..5126e20eb --- /dev/null +++ b/packed_simd/type.u128x2.html @@ -0,0 +1,252 @@ +packed_simd::u128x2 - Rust

[][src]Type Definition packed_simd::u128x2

type u128x2 = Simd<[u128; 2]>;

A 256-bit vector with 2 u128 lanes.

+

Implementations

impl u128x2[src]

pub const fn new(x0: u128, x1: u128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u128x2[src]

pub fn rotate_left(self, n: u128x2) -> u128x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u128x2) -> u128x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u128x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u128x2[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u128x2[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

+

pub fn min_element(self) -> u128[src]

Smallest vector element value.

+

impl u128x2[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u128x2[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u128x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u128x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u128x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u128x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u128x2[src]

pub fn eq(self, other: Self) -> m128x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl u128x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl u128x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x2>[src]

Returns a wrapper that implements Ord.

+

impl u128x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u128> for u128x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u128; 2]>> for u128x2[src]

impl AddAssign<u128> for u128x2[src]

impl Binary for u128x2[src]

impl BitAnd<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u128> for u128x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitAndAssign<u128> for u128x2[src]

impl BitOr<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u128> for u128x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitOrAssign<u128> for u128x2[src]

impl BitXor<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u128> for u128x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u128; 2]>> for u128x2[src]

impl BitXorAssign<u128> for u128x2[src]

impl Debug for u128x2[src]

impl Default for u128x2[src]

impl Div<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u128> for u128x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u128; 2]>> for u128x2[src]

impl DivAssign<u128> for u128x2[src]

impl Eq for u128x2[src]

impl From<[u128; 2]> for u128x2[src]

impl From<Simd<[u16; 2]>> for u128x2[src]

impl From<Simd<[u32; 2]>> for u128x2[src]

impl From<Simd<[u64; 2]>> for u128x2[src]

impl From<Simd<[u8; 2]>> for u128x2[src]

impl FromBits<Simd<[f32; 8]>> for u128x2[src]

impl FromBits<Simd<[f64; 4]>> for u128x2[src]

impl FromBits<Simd<[i128; 2]>> for u128x2[src]

impl FromBits<Simd<[i16; 16]>> for u128x2[src]

impl FromBits<Simd<[i32; 8]>> for u128x2[src]

impl FromBits<Simd<[i64; 4]>> for u128x2[src]

impl FromBits<Simd<[i8; 32]>> for u128x2[src]

impl FromBits<Simd<[m128; 2]>> for u128x2[src]

impl FromBits<Simd<[m16; 16]>> for u128x2[src]

impl FromBits<Simd<[m32; 8]>> for u128x2[src]

impl FromBits<Simd<[m64; 4]>> for u128x2[src]

impl FromBits<Simd<[m8; 32]>> for u128x2[src]

impl FromBits<Simd<[u16; 16]>> for u128x2[src]

impl FromBits<Simd<[u32; 8]>> for u128x2[src]

impl FromBits<Simd<[u64; 4]>> for u128x2[src]

impl FromBits<Simd<[u8; 32]>> for u128x2[src]

impl FromBits<__m256> for u128x2[src]

impl FromBits<__m256d> for u128x2[src]

impl FromBits<__m256i> for u128x2[src]

impl FromCast<Simd<[f32; 2]>> for u128x2[src]

impl FromCast<Simd<[f64; 2]>> for u128x2[src]

impl FromCast<Simd<[i128; 2]>> for u128x2[src]

impl FromCast<Simd<[i16; 2]>> for u128x2[src]

impl FromCast<Simd<[i32; 2]>> for u128x2[src]

impl FromCast<Simd<[i64; 2]>> for u128x2[src]

impl FromCast<Simd<[i8; 2]>> for u128x2[src]

impl FromCast<Simd<[isize; 2]>> for u128x2[src]

impl FromCast<Simd<[m128; 2]>> for u128x2[src]

impl FromCast<Simd<[m16; 2]>> for u128x2[src]

impl FromCast<Simd<[m32; 2]>> for u128x2[src]

impl FromCast<Simd<[m64; 2]>> for u128x2[src]

impl FromCast<Simd<[m8; 2]>> for u128x2[src]

impl FromCast<Simd<[msize; 2]>> for u128x2[src]

impl FromCast<Simd<[u16; 2]>> for u128x2[src]

impl FromCast<Simd<[u32; 2]>> for u128x2[src]

impl FromCast<Simd<[u64; 2]>> for u128x2[src]

impl FromCast<Simd<[u8; 2]>> for u128x2[src]

impl FromCast<Simd<[usize; 2]>> for u128x2[src]

impl Hash for u128x2[src]

impl LowerHex for u128x2[src]

impl Mul<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u128> for u128x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u128; 2]>> for u128x2[src]

impl MulAssign<u128> for u128x2[src]

impl Not for u128x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u128x2[src]

impl PartialEq<Simd<[u128; 2]>> for u128x2[src]

impl<'a> Product<&'a Simd<[u128; 2]>> for u128x2[src]

impl Product<Simd<[u128; 2]>> for u128x2[src]

impl Rem<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u128> for u128x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u128; 2]>> for u128x2[src]

impl RemAssign<u128> for u128x2[src]

impl Shl<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u128x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u128; 2]>> for u128x2[src]

impl ShlAssign<u32> for u128x2[src]

impl Shr<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u128x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u128; 2]>> for u128x2[src]

impl ShrAssign<u32> for u128x2[src]

impl Simd for u128x2[src]

type Element = u128

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[u128; 2]>> for u128x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u128> for u128x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u128; 2]>> for u128x2[src]

impl SubAssign<u128> for u128x2[src]

impl<'a> Sum<&'a Simd<[u128; 2]>> for u128x2[src]

impl Sum<Simd<[u128; 2]>> for u128x2[src]

impl UpperHex for u128x2[src]

\ No newline at end of file diff --git a/packed_simd/type.u128x4.html b/packed_simd/type.u128x4.html new file mode 100644 index 000000000..bd27f4daa --- /dev/null +++ b/packed_simd/type.u128x4.html @@ -0,0 +1,249 @@ +packed_simd::u128x4 - Rust

[][src]Type Definition packed_simd::u128x4

type u128x4 = Simd<[u128; 4]>;

A 512-bit vector with 4 u128 lanes.

+

Implementations

impl u128x4[src]

pub const fn new(x0: u128, x1: u128, x2: u128, x3: u128) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u128) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u128[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u128[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u128) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u128x4[src]

pub fn rotate_left(self, n: u128x4) -> u128x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u128x4) -> u128x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u128x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u128x4[src]

pub fn wrapping_sum(self) -> u128[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u128[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u128x4[src]

pub fn max_element(self) -> u128[src]

Largest vector element value.

+

pub fn min_element(self) -> u128[src]

Smallest vector element value.

+

impl u128x4[src]

pub fn and(self) -> u128[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u128[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u128[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u128x4[src]

pub fn from_slice_aligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u128]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u128x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u128])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u128x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u128x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u128x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u128x4[src]

pub fn eq(self, other: Self) -> m128x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m128x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m128x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m128x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m128x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m128x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl u128x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u128x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl u128x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u128x4>[src]

Returns a wrapper that implements Ord.

+

impl u128x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u128> for u128x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u128; 4]>> for u128x4[src]

impl AddAssign<u128> for u128x4[src]

impl Binary for u128x4[src]

impl BitAnd<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u128> for u128x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitAndAssign<u128> for u128x4[src]

impl BitOr<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u128> for u128x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitOrAssign<u128> for u128x4[src]

impl BitXor<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u128> for u128x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u128; 4]>> for u128x4[src]

impl BitXorAssign<u128> for u128x4[src]

impl Debug for u128x4[src]

impl Default for u128x4[src]

impl Div<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u128> for u128x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u128; 4]>> for u128x4[src]

impl DivAssign<u128> for u128x4[src]

impl Eq for u128x4[src]

impl From<[u128; 4]> for u128x4[src]

impl From<Simd<[u16; 4]>> for u128x4[src]

impl From<Simd<[u32; 4]>> for u128x4[src]

impl From<Simd<[u64; 4]>> for u128x4[src]

impl From<Simd<[u8; 4]>> for u128x4[src]

impl FromBits<Simd<[f32; 16]>> for u128x4[src]

impl FromBits<Simd<[f64; 8]>> for u128x4[src]

impl FromBits<Simd<[i128; 4]>> for u128x4[src]

impl FromBits<Simd<[i16; 32]>> for u128x4[src]

impl FromBits<Simd<[i32; 16]>> for u128x4[src]

impl FromBits<Simd<[i64; 8]>> for u128x4[src]

impl FromBits<Simd<[i8; 64]>> for u128x4[src]

impl FromBits<Simd<[m128; 4]>> for u128x4[src]

impl FromBits<Simd<[m16; 32]>> for u128x4[src]

impl FromBits<Simd<[m32; 16]>> for u128x4[src]

impl FromBits<Simd<[m64; 8]>> for u128x4[src]

impl FromBits<Simd<[m8; 64]>> for u128x4[src]

impl FromBits<Simd<[u16; 32]>> for u128x4[src]

impl FromBits<Simd<[u32; 16]>> for u128x4[src]

impl FromBits<Simd<[u64; 8]>> for u128x4[src]

impl FromBits<Simd<[u8; 64]>> for u128x4[src]

impl FromCast<Simd<[f32; 4]>> for u128x4[src]

impl FromCast<Simd<[f64; 4]>> for u128x4[src]

impl FromCast<Simd<[i128; 4]>> for u128x4[src]

impl FromCast<Simd<[i16; 4]>> for u128x4[src]

impl FromCast<Simd<[i32; 4]>> for u128x4[src]

impl FromCast<Simd<[i64; 4]>> for u128x4[src]

impl FromCast<Simd<[i8; 4]>> for u128x4[src]

impl FromCast<Simd<[isize; 4]>> for u128x4[src]

impl FromCast<Simd<[m128; 4]>> for u128x4[src]

impl FromCast<Simd<[m16; 4]>> for u128x4[src]

impl FromCast<Simd<[m32; 4]>> for u128x4[src]

impl FromCast<Simd<[m64; 4]>> for u128x4[src]

impl FromCast<Simd<[m8; 4]>> for u128x4[src]

impl FromCast<Simd<[msize; 4]>> for u128x4[src]

impl FromCast<Simd<[u16; 4]>> for u128x4[src]

impl FromCast<Simd<[u32; 4]>> for u128x4[src]

impl FromCast<Simd<[u64; 4]>> for u128x4[src]

impl FromCast<Simd<[u8; 4]>> for u128x4[src]

impl FromCast<Simd<[usize; 4]>> for u128x4[src]

impl Hash for u128x4[src]

impl LowerHex for u128x4[src]

impl Mul<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u128> for u128x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u128; 4]>> for u128x4[src]

impl MulAssign<u128> for u128x4[src]

impl Not for u128x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u128x4[src]

impl PartialEq<Simd<[u128; 4]>> for u128x4[src]

impl<'a> Product<&'a Simd<[u128; 4]>> for u128x4[src]

impl Product<Simd<[u128; 4]>> for u128x4[src]

impl Rem<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u128> for u128x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u128; 4]>> for u128x4[src]

impl RemAssign<u128> for u128x4[src]

impl Shl<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u128x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u128; 4]>> for u128x4[src]

impl ShlAssign<u32> for u128x4[src]

impl Shr<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u128x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u128; 4]>> for u128x4[src]

impl ShrAssign<u32> for u128x4[src]

impl Simd for u128x4[src]

type Element = u128

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[u128; 4]>> for u128x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u128> for u128x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u128; 4]>> for u128x4[src]

impl SubAssign<u128> for u128x4[src]

impl<'a> Sum<&'a Simd<[u128; 4]>> for u128x4[src]

impl Sum<Simd<[u128; 4]>> for u128x4[src]

impl UpperHex for u128x4[src]

\ No newline at end of file diff --git a/packed_simd/type.u16x16.html b/packed_simd/type.u16x16.html new file mode 100644 index 000000000..642f43961 --- /dev/null +++ b/packed_simd/type.u16x16.html @@ -0,0 +1,239 @@ +packed_simd::u16x16 - Rust

[][src]Type Definition packed_simd::u16x16

type u16x16 = Simd<[u16; 16]>;

A 256-bit vector with 16 u16 lanes.

+

Implementations

impl u16x16[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16,
    x8: u16,
    x9: u16,
    x10: u16,
    x11: u16,
    x12: u16,
    x13: u16,
    x14: u16,
    x15: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u16x16[src]

pub fn rotate_left(self, n: u16x16) -> u16x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x16) -> u16x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u16x16[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u16x16[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u16x16[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl u16x16[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u16x16[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x16[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x16[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u16x16[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u16x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u16x16[src]

pub fn eq(self, other: Self) -> m16x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl u16x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl u16x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x16>[src]

Returns a wrapper that implements Ord.

+

impl u16x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u16> for u16x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u16; 16]>> for u16x16[src]

impl AddAssign<u16> for u16x16[src]

impl Binary for u16x16[src]

impl BitAnd<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u16> for u16x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitAndAssign<u16> for u16x16[src]

impl BitOr<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u16> for u16x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitOrAssign<u16> for u16x16[src]

impl BitXor<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u16> for u16x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u16; 16]>> for u16x16[src]

impl BitXorAssign<u16> for u16x16[src]

impl Debug for u16x16[src]

impl Default for u16x16[src]

impl Div<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u16> for u16x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u16; 16]>> for u16x16[src]

impl DivAssign<u16> for u16x16[src]

impl Eq for u16x16[src]

impl From<[u16; 16]> for u16x16[src]

impl From<Simd<[u8; 16]>> for u16x16[src]

impl FromBits<Simd<[f32; 8]>> for u16x16[src]

impl FromBits<Simd<[f64; 4]>> for u16x16[src]

impl FromBits<Simd<[i128; 2]>> for u16x16[src]

impl FromBits<Simd<[i16; 16]>> for u16x16[src]

impl FromBits<Simd<[i32; 8]>> for u16x16[src]

impl FromBits<Simd<[i64; 4]>> for u16x16[src]

impl FromBits<Simd<[i8; 32]>> for u16x16[src]

impl FromBits<Simd<[m128; 2]>> for u16x16[src]

impl FromBits<Simd<[m16; 16]>> for u16x16[src]

impl FromBits<Simd<[m32; 8]>> for u16x16[src]

impl FromBits<Simd<[m64; 4]>> for u16x16[src]

impl FromBits<Simd<[m8; 32]>> for u16x16[src]

impl FromBits<Simd<[u128; 2]>> for u16x16[src]

impl FromBits<Simd<[u32; 8]>> for u16x16[src]

impl FromBits<Simd<[u64; 4]>> for u16x16[src]

impl FromBits<Simd<[u8; 32]>> for u16x16[src]

impl FromBits<__m256> for u16x16[src]

impl FromBits<__m256d> for u16x16[src]

impl FromBits<__m256i> for u16x16[src]

impl FromCast<Simd<[f32; 16]>> for u16x16[src]

impl FromCast<Simd<[i16; 16]>> for u16x16[src]

impl FromCast<Simd<[i32; 16]>> for u16x16[src]

impl FromCast<Simd<[i8; 16]>> for u16x16[src]

impl FromCast<Simd<[m16; 16]>> for u16x16[src]

impl FromCast<Simd<[m32; 16]>> for u16x16[src]

impl FromCast<Simd<[m8; 16]>> for u16x16[src]

impl FromCast<Simd<[u32; 16]>> for u16x16[src]

impl FromCast<Simd<[u8; 16]>> for u16x16[src]

impl Hash for u16x16[src]

impl LowerHex for u16x16[src]

impl Mul<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u16> for u16x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u16; 16]>> for u16x16[src]

impl MulAssign<u16> for u16x16[src]

impl Not for u16x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u16x16[src]

impl PartialEq<Simd<[u16; 16]>> for u16x16[src]

impl<'a> Product<&'a Simd<[u16; 16]>> for u16x16[src]

impl Product<Simd<[u16; 16]>> for u16x16[src]

impl Rem<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u16> for u16x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u16; 16]>> for u16x16[src]

impl RemAssign<u16> for u16x16[src]

impl Shl<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u16x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u16; 16]>> for u16x16[src]

impl ShlAssign<u32> for u16x16[src]

impl Shr<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u16x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u16; 16]>> for u16x16[src]

impl ShrAssign<u32> for u16x16[src]

impl Simd for u16x16[src]

type Element = u16

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+

impl Sub<Simd<[u16; 16]>> for u16x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u16> for u16x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u16; 16]>> for u16x16[src]

impl SubAssign<u16> for u16x16[src]

impl<'a> Sum<&'a Simd<[u16; 16]>> for u16x16[src]

impl Sum<Simd<[u16; 16]>> for u16x16[src]

impl UpperHex for u16x16[src]

\ No newline at end of file diff --git a/packed_simd/type.u16x2.html b/packed_simd/type.u16x2.html new file mode 100644 index 000000000..0dace6ddc --- /dev/null +++ b/packed_simd/type.u16x2.html @@ -0,0 +1,235 @@ +packed_simd::u16x2 - Rust

[][src]Type Definition packed_simd::u16x2

type u16x2 = Simd<[u16; 2]>;

A 32-bit vector with 2 u16 lanes.

+

Implementations

impl u16x2[src]

pub const fn new(x0: u16, x1: u16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u16x2[src]

pub fn rotate_left(self, n: u16x2) -> u16x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x2) -> u16x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u16x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u16x2[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u16x2[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl u16x2[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u16x2[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u16x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u16x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u16x2[src]

pub fn eq(self, other: Self) -> m16x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl u16x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl u16x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x2>[src]

Returns a wrapper that implements Ord.

+

impl u16x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u16> for u16x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u16; 2]>> for u16x2[src]

impl AddAssign<u16> for u16x2[src]

impl Binary for u16x2[src]

impl BitAnd<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u16> for u16x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitAndAssign<u16> for u16x2[src]

impl BitOr<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u16> for u16x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitOrAssign<u16> for u16x2[src]

impl BitXor<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u16> for u16x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u16; 2]>> for u16x2[src]

impl BitXorAssign<u16> for u16x2[src]

impl Debug for u16x2[src]

impl Default for u16x2[src]

impl Div<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u16> for u16x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u16; 2]>> for u16x2[src]

impl DivAssign<u16> for u16x2[src]

impl Eq for u16x2[src]

impl From<[u16; 2]> for u16x2[src]

impl From<Simd<[u8; 2]>> for u16x2[src]

impl FromBits<Simd<[i16; 2]>> for u16x2[src]

impl FromBits<Simd<[i8; 4]>> for u16x2[src]

impl FromBits<Simd<[m16; 2]>> for u16x2[src]

impl FromBits<Simd<[m8; 4]>> for u16x2[src]

impl FromBits<Simd<[u8; 4]>> for u16x2[src]

impl FromCast<Simd<[f32; 2]>> for u16x2[src]

impl FromCast<Simd<[f64; 2]>> for u16x2[src]

impl FromCast<Simd<[i128; 2]>> for u16x2[src]

impl FromCast<Simd<[i16; 2]>> for u16x2[src]

impl FromCast<Simd<[i32; 2]>> for u16x2[src]

impl FromCast<Simd<[i64; 2]>> for u16x2[src]

impl FromCast<Simd<[i8; 2]>> for u16x2[src]

impl FromCast<Simd<[isize; 2]>> for u16x2[src]

impl FromCast<Simd<[m128; 2]>> for u16x2[src]

impl FromCast<Simd<[m16; 2]>> for u16x2[src]

impl FromCast<Simd<[m32; 2]>> for u16x2[src]

impl FromCast<Simd<[m64; 2]>> for u16x2[src]

impl FromCast<Simd<[m8; 2]>> for u16x2[src]

impl FromCast<Simd<[msize; 2]>> for u16x2[src]

impl FromCast<Simd<[u128; 2]>> for u16x2[src]

impl FromCast<Simd<[u32; 2]>> for u16x2[src]

impl FromCast<Simd<[u64; 2]>> for u16x2[src]

impl FromCast<Simd<[u8; 2]>> for u16x2[src]

impl FromCast<Simd<[usize; 2]>> for u16x2[src]

impl Hash for u16x2[src]

impl LowerHex for u16x2[src]

impl Mul<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u16> for u16x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u16; 2]>> for u16x2[src]

impl MulAssign<u16> for u16x2[src]

impl Not for u16x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u16x2[src]

impl PartialEq<Simd<[u16; 2]>> for u16x2[src]

impl<'a> Product<&'a Simd<[u16; 2]>> for u16x2[src]

impl Product<Simd<[u16; 2]>> for u16x2[src]

impl Rem<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u16> for u16x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u16; 2]>> for u16x2[src]

impl RemAssign<u16> for u16x2[src]

impl Shl<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u16x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u16; 2]>> for u16x2[src]

impl ShlAssign<u32> for u16x2[src]

impl Shr<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u16x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u16; 2]>> for u16x2[src]

impl ShrAssign<u32> for u16x2[src]

impl Simd for u16x2[src]

type Element = u16

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[u16; 2]>> for u16x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u16> for u16x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u16; 2]>> for u16x2[src]

impl SubAssign<u16> for u16x2[src]

impl<'a> Sum<&'a Simd<[u16; 2]>> for u16x2[src]

impl Sum<Simd<[u16; 2]>> for u16x2[src]

impl UpperHex for u16x2[src]

\ No newline at end of file diff --git a/packed_simd/type.u16x32.html b/packed_simd/type.u16x32.html new file mode 100644 index 000000000..342d27fe7 --- /dev/null +++ b/packed_simd/type.u16x32.html @@ -0,0 +1,232 @@ +packed_simd::u16x32 - Rust

[][src]Type Definition packed_simd::u16x32

type u16x32 = Simd<[u16; 32]>;

A 512-bit vector with 32 u16 lanes.

+

Implementations

impl u16x32[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16,
    x8: u16,
    x9: u16,
    x10: u16,
    x11: u16,
    x12: u16,
    x13: u16,
    x14: u16,
    x15: u16,
    x16: u16,
    x17: u16,
    x18: u16,
    x19: u16,
    x20: u16,
    x21: u16,
    x22: u16,
    x23: u16,
    x24: u16,
    x25: u16,
    x26: u16,
    x27: u16,
    x28: u16,
    x29: u16,
    x30: u16,
    x31: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u16x32[src]

pub fn rotate_left(self, n: u16x32) -> u16x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x32) -> u16x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u16x32[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u16x32[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u16x32[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl u16x32[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u16x32[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x32[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x32[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u16x32[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u16x32[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u16x32[src]

pub fn eq(self, other: Self) -> m16x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl u16x32[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl u16x32[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x32>[src]

Returns a wrapper that implements Ord.

+

impl u16x32[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u16> for u16x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u16; 32]>> for u16x32[src]

impl AddAssign<u16> for u16x32[src]

impl Binary for u16x32[src]

impl BitAnd<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u16> for u16x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitAndAssign<u16> for u16x32[src]

impl BitOr<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u16> for u16x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitOrAssign<u16> for u16x32[src]

impl BitXor<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u16> for u16x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u16; 32]>> for u16x32[src]

impl BitXorAssign<u16> for u16x32[src]

impl Debug for u16x32[src]

impl Default for u16x32[src]

impl Div<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u16> for u16x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u16; 32]>> for u16x32[src]

impl DivAssign<u16> for u16x32[src]

impl Eq for u16x32[src]

impl From<[u16; 32]> for u16x32[src]

impl From<Simd<[u8; 32]>> for u16x32[src]

impl FromBits<Simd<[f32; 16]>> for u16x32[src]

impl FromBits<Simd<[f64; 8]>> for u16x32[src]

impl FromBits<Simd<[i128; 4]>> for u16x32[src]

impl FromBits<Simd<[i16; 32]>> for u16x32[src]

impl FromBits<Simd<[i32; 16]>> for u16x32[src]

impl FromBits<Simd<[i64; 8]>> for u16x32[src]

impl FromBits<Simd<[i8; 64]>> for u16x32[src]

impl FromBits<Simd<[m128; 4]>> for u16x32[src]

impl FromBits<Simd<[m16; 32]>> for u16x32[src]

impl FromBits<Simd<[m32; 16]>> for u16x32[src]

impl FromBits<Simd<[m64; 8]>> for u16x32[src]

impl FromBits<Simd<[m8; 64]>> for u16x32[src]

impl FromBits<Simd<[u128; 4]>> for u16x32[src]

impl FromBits<Simd<[u32; 16]>> for u16x32[src]

impl FromBits<Simd<[u64; 8]>> for u16x32[src]

impl FromBits<Simd<[u8; 64]>> for u16x32[src]

impl FromCast<Simd<[i16; 32]>> for u16x32[src]

impl FromCast<Simd<[i8; 32]>> for u16x32[src]

impl FromCast<Simd<[m16; 32]>> for u16x32[src]

impl FromCast<Simd<[m8; 32]>> for u16x32[src]

impl FromCast<Simd<[u8; 32]>> for u16x32[src]

impl Hash for u16x32[src]

impl LowerHex for u16x32[src]

impl Mul<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u16> for u16x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u16; 32]>> for u16x32[src]

impl MulAssign<u16> for u16x32[src]

impl Not for u16x32[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u16x32[src]

impl PartialEq<Simd<[u16; 32]>> for u16x32[src]

impl<'a> Product<&'a Simd<[u16; 32]>> for u16x32[src]

impl Product<Simd<[u16; 32]>> for u16x32[src]

impl Rem<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u16> for u16x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u16; 32]>> for u16x32[src]

impl RemAssign<u16> for u16x32[src]

impl Shl<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u16x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u16; 32]>> for u16x32[src]

impl ShlAssign<u32> for u16x32[src]

impl Shr<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u16x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u16; 32]>> for u16x32[src]

impl ShrAssign<u32> for u16x32[src]

impl Simd for u16x32[src]

type Element = u16

Element type of the SIMD vector

+

type LanesType = [u32; 32]

The type: [u32; Self::N].

+

impl Sub<Simd<[u16; 32]>> for u16x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u16> for u16x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u16; 32]>> for u16x32[src]

impl SubAssign<u16> for u16x32[src]

impl<'a> Sum<&'a Simd<[u16; 32]>> for u16x32[src]

impl Sum<Simd<[u16; 32]>> for u16x32[src]

impl UpperHex for u16x32[src]

\ No newline at end of file diff --git a/packed_simd/type.u16x4.html b/packed_simd/type.u16x4.html new file mode 100644 index 000000000..e13d9704f --- /dev/null +++ b/packed_simd/type.u16x4.html @@ -0,0 +1,240 @@ +packed_simd::u16x4 - Rust

[][src]Type Definition packed_simd::u16x4

type u16x4 = Simd<[u16; 4]>;

A 64-bit vector with 4 u16 lanes.

+

Implementations

impl u16x4[src]

pub const fn new(x0: u16, x1: u16, x2: u16, x3: u16) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u16x4[src]

pub fn rotate_left(self, n: u16x4) -> u16x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x4) -> u16x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u16x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u16x4[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u16x4[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl u16x4[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u16x4[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u16x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u16x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u16x4[src]

pub fn eq(self, other: Self) -> m16x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl u16x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl u16x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x4>[src]

Returns a wrapper that implements Ord.

+

impl u16x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u16> for u16x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u16; 4]>> for u16x4[src]

impl AddAssign<u16> for u16x4[src]

impl Binary for u16x4[src]

impl BitAnd<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u16> for u16x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitAndAssign<u16> for u16x4[src]

impl BitOr<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u16> for u16x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitOrAssign<u16> for u16x4[src]

impl BitXor<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u16> for u16x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u16; 4]>> for u16x4[src]

impl BitXorAssign<u16> for u16x4[src]

impl Debug for u16x4[src]

impl Default for u16x4[src]

impl Div<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u16> for u16x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u16; 4]>> for u16x4[src]

impl DivAssign<u16> for u16x4[src]

impl Eq for u16x4[src]

impl From<[u16; 4]> for u16x4[src]

impl From<Simd<[u8; 4]>> for u16x4[src]

impl FromBits<Simd<[f32; 2]>> for u16x4[src]

impl FromBits<Simd<[i16; 4]>> for u16x4[src]

impl FromBits<Simd<[i32; 2]>> for u16x4[src]

impl FromBits<Simd<[i8; 8]>> for u16x4[src]

impl FromBits<Simd<[m16; 4]>> for u16x4[src]

impl FromBits<Simd<[m32; 2]>> for u16x4[src]

impl FromBits<Simd<[m8; 8]>> for u16x4[src]

impl FromBits<Simd<[u32; 2]>> for u16x4[src]

impl FromBits<Simd<[u8; 8]>> for u16x4[src]

impl FromBits<__m64> for u16x4[src]

impl FromCast<Simd<[f32; 4]>> for u16x4[src]

impl FromCast<Simd<[f64; 4]>> for u16x4[src]

impl FromCast<Simd<[i128; 4]>> for u16x4[src]

impl FromCast<Simd<[i16; 4]>> for u16x4[src]

impl FromCast<Simd<[i32; 4]>> for u16x4[src]

impl FromCast<Simd<[i64; 4]>> for u16x4[src]

impl FromCast<Simd<[i8; 4]>> for u16x4[src]

impl FromCast<Simd<[isize; 4]>> for u16x4[src]

impl FromCast<Simd<[m128; 4]>> for u16x4[src]

impl FromCast<Simd<[m16; 4]>> for u16x4[src]

impl FromCast<Simd<[m32; 4]>> for u16x4[src]

impl FromCast<Simd<[m64; 4]>> for u16x4[src]

impl FromCast<Simd<[m8; 4]>> for u16x4[src]

impl FromCast<Simd<[msize; 4]>> for u16x4[src]

impl FromCast<Simd<[u128; 4]>> for u16x4[src]

impl FromCast<Simd<[u32; 4]>> for u16x4[src]

impl FromCast<Simd<[u64; 4]>> for u16x4[src]

impl FromCast<Simd<[u8; 4]>> for u16x4[src]

impl FromCast<Simd<[usize; 4]>> for u16x4[src]

impl Hash for u16x4[src]

impl LowerHex for u16x4[src]

impl Mul<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u16> for u16x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u16; 4]>> for u16x4[src]

impl MulAssign<u16> for u16x4[src]

impl Not for u16x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u16x4[src]

impl PartialEq<Simd<[u16; 4]>> for u16x4[src]

impl<'a> Product<&'a Simd<[u16; 4]>> for u16x4[src]

impl Product<Simd<[u16; 4]>> for u16x4[src]

impl Rem<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u16> for u16x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u16; 4]>> for u16x4[src]

impl RemAssign<u16> for u16x4[src]

impl Shl<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u16x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u16; 4]>> for u16x4[src]

impl ShlAssign<u32> for u16x4[src]

impl Shr<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u16x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u16; 4]>> for u16x4[src]

impl ShrAssign<u32> for u16x4[src]

impl Simd for u16x4[src]

type Element = u16

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[u16; 4]>> for u16x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u16> for u16x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u16; 4]>> for u16x4[src]

impl SubAssign<u16> for u16x4[src]

impl<'a> Sum<&'a Simd<[u16; 4]>> for u16x4[src]

impl Sum<Simd<[u16; 4]>> for u16x4[src]

impl UpperHex for u16x4[src]

\ No newline at end of file diff --git a/packed_simd/type.u16x8.html b/packed_simd/type.u16x8.html new file mode 100644 index 000000000..53ad0e3c3 --- /dev/null +++ b/packed_simd/type.u16x8.html @@ -0,0 +1,246 @@ +packed_simd::u16x8 - Rust

[][src]Type Definition packed_simd::u16x8

type u16x8 = Simd<[u16; 8]>;

A 128-bit vector with 8 u16 lanes.

+

Implementations

impl u16x8[src]

pub const fn new(
    x0: u16,
    x1: u16,
    x2: u16,
    x3: u16,
    x4: u16,
    x5: u16,
    x6: u16,
    x7: u16
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u16) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u16[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u16[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u16) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u16x8[src]

pub fn rotate_left(self, n: u16x8) -> u16x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u16x8) -> u16x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u16x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u16x8[src]

pub fn wrapping_sum(self) -> u16[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u16[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u16x8[src]

pub fn max_element(self) -> u16[src]

Largest vector element value.

+

pub fn min_element(self) -> u16[src]

Smallest vector element value.

+

impl u16x8[src]

pub fn and(self) -> u16[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u16[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u16[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u16x8[src]

pub fn from_slice_aligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u16]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u16])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u16x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u16x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u16x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u16x8[src]

pub fn eq(self, other: Self) -> m16x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m16x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m16x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m16x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m16x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m16x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl u16x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u16x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl u16x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u16x8>[src]

Returns a wrapper that implements Ord.

+

impl u16x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u16> for u16x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u16; 8]>> for u16x8[src]

impl AddAssign<u16> for u16x8[src]

impl Binary for u16x8[src]

impl BitAnd<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u16> for u16x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitAndAssign<u16> for u16x8[src]

impl BitOr<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u16> for u16x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitOrAssign<u16> for u16x8[src]

impl BitXor<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u16> for u16x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u16; 8]>> for u16x8[src]

impl BitXorAssign<u16> for u16x8[src]

impl Debug for u16x8[src]

impl Default for u16x8[src]

impl Div<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u16> for u16x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u16; 8]>> for u16x8[src]

impl DivAssign<u16> for u16x8[src]

impl Eq for u16x8[src]

impl From<[u16; 8]> for u16x8[src]

impl From<Simd<[u8; 8]>> for u16x8[src]

impl FromBits<Simd<[f32; 4]>> for u16x8[src]

impl FromBits<Simd<[f64; 2]>> for u16x8[src]

impl FromBits<Simd<[i128; 1]>> for u16x8[src]

impl FromBits<Simd<[i16; 8]>> for u16x8[src]

impl FromBits<Simd<[i32; 4]>> for u16x8[src]

impl FromBits<Simd<[i64; 2]>> for u16x8[src]

impl FromBits<Simd<[i8; 16]>> for u16x8[src]

impl FromBits<Simd<[m128; 1]>> for u16x8[src]

impl FromBits<Simd<[m16; 8]>> for u16x8[src]

impl FromBits<Simd<[m32; 4]>> for u16x8[src]

impl FromBits<Simd<[m64; 2]>> for u16x8[src]

impl FromBits<Simd<[m8; 16]>> for u16x8[src]

impl FromBits<Simd<[u128; 1]>> for u16x8[src]

impl FromBits<Simd<[u32; 4]>> for u16x8[src]

impl FromBits<Simd<[u64; 2]>> for u16x8[src]

impl FromBits<Simd<[u8; 16]>> for u16x8[src]

impl FromBits<__m128> for u16x8[src]

impl FromBits<__m128d> for u16x8[src]

impl FromBits<__m128i> for u16x8[src]

impl FromCast<Simd<[f32; 8]>> for u16x8[src]

impl FromCast<Simd<[f64; 8]>> for u16x8[src]

impl FromCast<Simd<[i16; 8]>> for u16x8[src]

impl FromCast<Simd<[i32; 8]>> for u16x8[src]

impl FromCast<Simd<[i64; 8]>> for u16x8[src]

impl FromCast<Simd<[i8; 8]>> for u16x8[src]

impl FromCast<Simd<[isize; 8]>> for u16x8[src]

impl FromCast<Simd<[m16; 8]>> for u16x8[src]

impl FromCast<Simd<[m32; 8]>> for u16x8[src]

impl FromCast<Simd<[m64; 8]>> for u16x8[src]

impl FromCast<Simd<[m8; 8]>> for u16x8[src]

impl FromCast<Simd<[msize; 8]>> for u16x8[src]

impl FromCast<Simd<[u32; 8]>> for u16x8[src]

impl FromCast<Simd<[u64; 8]>> for u16x8[src]

impl FromCast<Simd<[u8; 8]>> for u16x8[src]

impl FromCast<Simd<[usize; 8]>> for u16x8[src]

impl Hash for u16x8[src]

impl LowerHex for u16x8[src]

impl Mul<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u16> for u16x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u16; 8]>> for u16x8[src]

impl MulAssign<u16> for u16x8[src]

impl Not for u16x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u16x8[src]

impl PartialEq<Simd<[u16; 8]>> for u16x8[src]

impl<'a> Product<&'a Simd<[u16; 8]>> for u16x8[src]

impl Product<Simd<[u16; 8]>> for u16x8[src]

impl Rem<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u16> for u16x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u16; 8]>> for u16x8[src]

impl RemAssign<u16> for u16x8[src]

impl Shl<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u16x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u16; 8]>> for u16x8[src]

impl ShlAssign<u32> for u16x8[src]

impl Shr<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u16x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u16; 8]>> for u16x8[src]

impl ShrAssign<u32> for u16x8[src]

impl Simd for u16x8[src]

type Element = u16

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[u16; 8]>> for u16x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u16> for u16x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u16; 8]>> for u16x8[src]

impl SubAssign<u16> for u16x8[src]

impl<'a> Sum<&'a Simd<[u16; 8]>> for u16x8[src]

impl Sum<Simd<[u16; 8]>> for u16x8[src]

impl UpperHex for u16x8[src]

\ No newline at end of file diff --git a/packed_simd/type.u32x16.html b/packed_simd/type.u32x16.html new file mode 100644 index 000000000..3fff4c769 --- /dev/null +++ b/packed_simd/type.u32x16.html @@ -0,0 +1,237 @@ +packed_simd::u32x16 - Rust

[][src]Type Definition packed_simd::u32x16

type u32x16 = Simd<[u32; 16]>;

A 512-bit vector with 16 u32 lanes.

+

Implementations

impl u32x16[src]

pub const fn new(
    x0: u32,
    x1: u32,
    x2: u32,
    x3: u32,
    x4: u32,
    x5: u32,
    x6: u32,
    x7: u32,
    x8: u32,
    x9: u32,
    x10: u32,
    x11: u32,
    x12: u32,
    x13: u32,
    x14: u32,
    x15: u32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u32x16[src]

pub fn rotate_left(self, n: u32x16) -> u32x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x16) -> u32x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u32x16[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u32x16[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u32x16[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl u32x16[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u32x16[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x16[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x16[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u32x16[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u32x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u32x16[src]

pub fn eq(self, other: Self) -> m32x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl u32x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl u32x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x16>[src]

Returns a wrapper that implements Ord.

+

impl u32x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u32> for u32x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u32; 16]>> for u32x16[src]

impl AddAssign<u32> for u32x16[src]

impl Binary for u32x16[src]

impl BitAnd<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u32> for u32x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitAndAssign<u32> for u32x16[src]

impl BitOr<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u32> for u32x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitOrAssign<u32> for u32x16[src]

impl BitXor<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u32> for u32x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u32; 16]>> for u32x16[src]

impl BitXorAssign<u32> for u32x16[src]

impl Debug for u32x16[src]

impl Default for u32x16[src]

impl Div<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u32> for u32x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u32; 16]>> for u32x16[src]

impl DivAssign<u32> for u32x16[src]

impl Eq for u32x16[src]

impl From<[u32; 16]> for u32x16[src]

impl From<Simd<[u16; 16]>> for u32x16[src]

impl From<Simd<[u8; 16]>> for u32x16[src]

impl FromBits<Simd<[f32; 16]>> for u32x16[src]

impl FromBits<Simd<[f64; 8]>> for u32x16[src]

impl FromBits<Simd<[i128; 4]>> for u32x16[src]

impl FromBits<Simd<[i16; 32]>> for u32x16[src]

impl FromBits<Simd<[i32; 16]>> for u32x16[src]

impl FromBits<Simd<[i64; 8]>> for u32x16[src]

impl FromBits<Simd<[i8; 64]>> for u32x16[src]

impl FromBits<Simd<[m128; 4]>> for u32x16[src]

impl FromBits<Simd<[m16; 32]>> for u32x16[src]

impl FromBits<Simd<[m32; 16]>> for u32x16[src]

impl FromBits<Simd<[m64; 8]>> for u32x16[src]

impl FromBits<Simd<[m8; 64]>> for u32x16[src]

impl FromBits<Simd<[u128; 4]>> for u32x16[src]

impl FromBits<Simd<[u16; 32]>> for u32x16[src]

impl FromBits<Simd<[u64; 8]>> for u32x16[src]

impl FromBits<Simd<[u8; 64]>> for u32x16[src]

impl FromCast<Simd<[f32; 16]>> for u32x16[src]

impl FromCast<Simd<[i16; 16]>> for u32x16[src]

impl FromCast<Simd<[i32; 16]>> for u32x16[src]

impl FromCast<Simd<[i8; 16]>> for u32x16[src]

impl FromCast<Simd<[m16; 16]>> for u32x16[src]

impl FromCast<Simd<[m32; 16]>> for u32x16[src]

impl FromCast<Simd<[m8; 16]>> for u32x16[src]

impl FromCast<Simd<[u16; 16]>> for u32x16[src]

impl FromCast<Simd<[u8; 16]>> for u32x16[src]

impl Hash for u32x16[src]

impl LowerHex for u32x16[src]

impl Mul<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u32> for u32x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u32; 16]>> for u32x16[src]

impl MulAssign<u32> for u32x16[src]

impl Not for u32x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u32x16[src]

impl PartialEq<Simd<[u32; 16]>> for u32x16[src]

impl<'a> Product<&'a Simd<[u32; 16]>> for u32x16[src]

impl Product<Simd<[u32; 16]>> for u32x16[src]

impl Rem<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u32> for u32x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u32; 16]>> for u32x16[src]

impl RemAssign<u32> for u32x16[src]

impl Shl<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u32x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u32; 16]>> for u32x16[src]

impl ShlAssign<u32> for u32x16[src]

impl Shr<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u32x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u32; 16]>> for u32x16[src]

impl ShrAssign<u32> for u32x16[src]

impl Simd for u32x16[src]

type Element = u32

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+

impl Sub<Simd<[u32; 16]>> for u32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u32> for u32x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u32; 16]>> for u32x16[src]

impl SubAssign<u32> for u32x16[src]

impl<'a> Sum<&'a Simd<[u32; 16]>> for u32x16[src]

impl Sum<Simd<[u32; 16]>> for u32x16[src]

impl UpperHex for u32x16[src]

\ No newline at end of file diff --git a/packed_simd/type.u32x2.html b/packed_simd/type.u32x2.html new file mode 100644 index 000000000..a9488319a --- /dev/null +++ b/packed_simd/type.u32x2.html @@ -0,0 +1,241 @@ +packed_simd::u32x2 - Rust

[][src]Type Definition packed_simd::u32x2

type u32x2 = Simd<[u32; 2]>;

A 64-bit vector with 2 u32 lanes.

+

Implementations

impl u32x2[src]

pub const fn new(x0: u32, x1: u32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u32x2[src]

pub fn rotate_left(self, n: u32x2) -> u32x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x2) -> u32x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u32x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u32x2[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u32x2[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl u32x2[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u32x2[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u32x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u32x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u32x2[src]

pub fn eq(self, other: Self) -> m32x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl u32x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl u32x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x2>[src]

Returns a wrapper that implements Ord.

+

impl u32x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u32> for u32x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u32; 2]>> for u32x2[src]

impl AddAssign<u32> for u32x2[src]

impl Binary for u32x2[src]

impl BitAnd<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u32> for u32x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitAndAssign<u32> for u32x2[src]

impl BitOr<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u32> for u32x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitOrAssign<u32> for u32x2[src]

impl BitXor<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u32> for u32x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u32; 2]>> for u32x2[src]

impl BitXorAssign<u32> for u32x2[src]

impl Debug for u32x2[src]

impl Default for u32x2[src]

impl Div<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u32> for u32x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u32; 2]>> for u32x2[src]

impl DivAssign<u32> for u32x2[src]

impl Eq for u32x2[src]

impl From<[u32; 2]> for u32x2[src]

impl From<Simd<[u16; 2]>> for u32x2[src]

impl From<Simd<[u8; 2]>> for u32x2[src]

impl FromBits<Simd<[f32; 2]>> for u32x2[src]

impl FromBits<Simd<[i16; 4]>> for u32x2[src]

impl FromBits<Simd<[i32; 2]>> for u32x2[src]

impl FromBits<Simd<[i8; 8]>> for u32x2[src]

impl FromBits<Simd<[m16; 4]>> for u32x2[src]

impl FromBits<Simd<[m32; 2]>> for u32x2[src]

impl FromBits<Simd<[m8; 8]>> for u32x2[src]

impl FromBits<Simd<[u16; 4]>> for u32x2[src]

impl FromBits<Simd<[u8; 8]>> for u32x2[src]

impl FromBits<__m64> for u32x2[src]

impl FromCast<Simd<[f32; 2]>> for u32x2[src]

impl FromCast<Simd<[f64; 2]>> for u32x2[src]

impl FromCast<Simd<[i128; 2]>> for u32x2[src]

impl FromCast<Simd<[i16; 2]>> for u32x2[src]

impl FromCast<Simd<[i32; 2]>> for u32x2[src]

impl FromCast<Simd<[i64; 2]>> for u32x2[src]

impl FromCast<Simd<[i8; 2]>> for u32x2[src]

impl FromCast<Simd<[isize; 2]>> for u32x2[src]

impl FromCast<Simd<[m128; 2]>> for u32x2[src]

impl FromCast<Simd<[m16; 2]>> for u32x2[src]

impl FromCast<Simd<[m32; 2]>> for u32x2[src]

impl FromCast<Simd<[m64; 2]>> for u32x2[src]

impl FromCast<Simd<[m8; 2]>> for u32x2[src]

impl FromCast<Simd<[msize; 2]>> for u32x2[src]

impl FromCast<Simd<[u128; 2]>> for u32x2[src]

impl FromCast<Simd<[u16; 2]>> for u32x2[src]

impl FromCast<Simd<[u64; 2]>> for u32x2[src]

impl FromCast<Simd<[u8; 2]>> for u32x2[src]

impl FromCast<Simd<[usize; 2]>> for u32x2[src]

impl Hash for u32x2[src]

impl LowerHex for u32x2[src]

impl Mul<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u32> for u32x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u32; 2]>> for u32x2[src]

impl MulAssign<u32> for u32x2[src]

impl Not for u32x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u32x2[src]

impl PartialEq<Simd<[u32; 2]>> for u32x2[src]

impl<'a> Product<&'a Simd<[u32; 2]>> for u32x2[src]

impl Product<Simd<[u32; 2]>> for u32x2[src]

impl Rem<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u32> for u32x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u32; 2]>> for u32x2[src]

impl RemAssign<u32> for u32x2[src]

impl Shl<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u32x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u32; 2]>> for u32x2[src]

impl ShlAssign<u32> for u32x2[src]

impl Shr<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u32x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u32; 2]>> for u32x2[src]

impl ShrAssign<u32> for u32x2[src]

impl Simd for u32x2[src]

type Element = u32

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[u32; 2]>> for u32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u32> for u32x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u32; 2]>> for u32x2[src]

impl SubAssign<u32> for u32x2[src]

impl<'a> Sum<&'a Simd<[u32; 2]>> for u32x2[src]

impl Sum<Simd<[u32; 2]>> for u32x2[src]

impl UpperHex for u32x2[src]

\ No newline at end of file diff --git a/packed_simd/type.u32x4.html b/packed_simd/type.u32x4.html new file mode 100644 index 000000000..2aff0b829 --- /dev/null +++ b/packed_simd/type.u32x4.html @@ -0,0 +1,250 @@ +packed_simd::u32x4 - Rust

[][src]Type Definition packed_simd::u32x4

type u32x4 = Simd<[u32; 4]>;

A 128-bit vector with 4 u32 lanes.

+

Implementations

impl u32x4[src]

pub const fn new(x0: u32, x1: u32, x2: u32, x3: u32) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u32x4[src]

pub fn rotate_left(self, n: u32x4) -> u32x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x4) -> u32x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u32x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u32x4[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u32x4[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl u32x4[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u32x4[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u32x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u32x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u32x4[src]

pub fn eq(self, other: Self) -> m32x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl u32x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl u32x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x4>[src]

Returns a wrapper that implements Ord.

+

impl u32x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u32> for u32x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u32; 4]>> for u32x4[src]

impl AddAssign<u32> for u32x4[src]

impl Binary for u32x4[src]

impl BitAnd<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u32> for u32x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitAndAssign<u32> for u32x4[src]

impl BitOr<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u32> for u32x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitOrAssign<u32> for u32x4[src]

impl BitXor<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u32> for u32x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u32; 4]>> for u32x4[src]

impl BitXorAssign<u32> for u32x4[src]

impl Debug for u32x4[src]

impl Default for u32x4[src]

impl Div<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u32> for u32x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u32; 4]>> for u32x4[src]

impl DivAssign<u32> for u32x4[src]

impl Eq for u32x4[src]

impl From<[u32; 4]> for u32x4[src]

impl From<Simd<[u16; 4]>> for u32x4[src]

impl From<Simd<[u8; 4]>> for u32x4[src]

impl FromBits<Simd<[f32; 4]>> for u32x4[src]

impl FromBits<Simd<[f64; 2]>> for u32x4[src]

impl FromBits<Simd<[i128; 1]>> for u32x4[src]

impl FromBits<Simd<[i16; 8]>> for u32x4[src]

impl FromBits<Simd<[i32; 4]>> for u32x4[src]

impl FromBits<Simd<[i64; 2]>> for u32x4[src]

impl FromBits<Simd<[i8; 16]>> for u32x4[src]

impl FromBits<Simd<[m128; 1]>> for u32x4[src]

impl FromBits<Simd<[m16; 8]>> for u32x4[src]

impl FromBits<Simd<[m32; 4]>> for u32x4[src]

impl FromBits<Simd<[m64; 2]>> for u32x4[src]

impl FromBits<Simd<[m8; 16]>> for u32x4[src]

impl FromBits<Simd<[u128; 1]>> for u32x4[src]

impl FromBits<Simd<[u16; 8]>> for u32x4[src]

impl FromBits<Simd<[u64; 2]>> for u32x4[src]

impl FromBits<Simd<[u8; 16]>> for u32x4[src]

impl FromBits<__m128> for u32x4[src]

impl FromBits<__m128d> for u32x4[src]

impl FromBits<__m128i> for u32x4[src]

impl FromCast<Simd<[f32; 4]>> for u32x4[src]

impl FromCast<Simd<[f64; 4]>> for u32x4[src]

impl FromCast<Simd<[i128; 4]>> for u32x4[src]

impl FromCast<Simd<[i16; 4]>> for u32x4[src]

impl FromCast<Simd<[i32; 4]>> for u32x4[src]

impl FromCast<Simd<[i64; 4]>> for u32x4[src]

impl FromCast<Simd<[i8; 4]>> for u32x4[src]

impl FromCast<Simd<[isize; 4]>> for u32x4[src]

impl FromCast<Simd<[m128; 4]>> for u32x4[src]

impl FromCast<Simd<[m16; 4]>> for u32x4[src]

impl FromCast<Simd<[m32; 4]>> for u32x4[src]

impl FromCast<Simd<[m64; 4]>> for u32x4[src]

impl FromCast<Simd<[m8; 4]>> for u32x4[src]

impl FromCast<Simd<[msize; 4]>> for u32x4[src]

impl FromCast<Simd<[u128; 4]>> for u32x4[src]

impl FromCast<Simd<[u16; 4]>> for u32x4[src]

impl FromCast<Simd<[u64; 4]>> for u32x4[src]

impl FromCast<Simd<[u8; 4]>> for u32x4[src]

impl FromCast<Simd<[usize; 4]>> for u32x4[src]

impl Hash for u32x4[src]

impl LowerHex for u32x4[src]

impl Mul<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u32> for u32x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u32; 4]>> for u32x4[src]

impl MulAssign<u32> for u32x4[src]

impl Not for u32x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u32x4[src]

impl PartialEq<Simd<[u32; 4]>> for u32x4[src]

impl<'a> Product<&'a Simd<[u32; 4]>> for u32x4[src]

impl Product<Simd<[u32; 4]>> for u32x4[src]

impl Rem<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u32> for u32x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u32; 4]>> for u32x4[src]

impl RemAssign<u32> for u32x4[src]

impl Shl<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u32x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u32; 4]>> for u32x4[src]

impl ShlAssign<u32> for u32x4[src]

impl Shr<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u32x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u32; 4]>> for u32x4[src]

impl ShrAssign<u32> for u32x4[src]

impl Simd for u32x4[src]

type Element = u32

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[u32; 4]>> for u32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u32> for u32x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u32; 4]>> for u32x4[src]

impl SubAssign<u32> for u32x4[src]

impl<'a> Sum<&'a Simd<[u32; 4]>> for u32x4[src]

impl Sum<Simd<[u32; 4]>> for u32x4[src]

impl UpperHex for u32x4[src]

\ No newline at end of file diff --git a/packed_simd/type.u32x8.html b/packed_simd/type.u32x8.html new file mode 100644 index 000000000..de99a3cb5 --- /dev/null +++ b/packed_simd/type.u32x8.html @@ -0,0 +1,247 @@ +packed_simd::u32x8 - Rust

[][src]Type Definition packed_simd::u32x8

type u32x8 = Simd<[u32; 8]>;

A 256-bit vector with 8 u32 lanes.

+

Implementations

impl u32x8[src]

pub const fn new(
    x0: u32,
    x1: u32,
    x2: u32,
    x3: u32,
    x4: u32,
    x5: u32,
    x6: u32,
    x7: u32
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u32) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u32[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u32[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u32) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u32x8[src]

pub fn rotate_left(self, n: u32x8) -> u32x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u32x8) -> u32x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u32x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u32x8[src]

pub fn wrapping_sum(self) -> u32[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u32[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u32x8[src]

pub fn max_element(self) -> u32[src]

Largest vector element value.

+

pub fn min_element(self) -> u32[src]

Smallest vector element value.

+

impl u32x8[src]

pub fn and(self) -> u32[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u32[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u32[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u32x8[src]

pub fn from_slice_aligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u32]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u32])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u32x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u32x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u32x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u32x8[src]

pub fn eq(self, other: Self) -> m32x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m32x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m32x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m32x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m32x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m32x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl u32x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u32x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl u32x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u32x8>[src]

Returns a wrapper that implements Ord.

+

impl u32x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u32> for u32x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u32; 8]>> for u32x8[src]

impl AddAssign<u32> for u32x8[src]

impl Binary for u32x8[src]

impl BitAnd<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u32> for u32x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitAndAssign<u32> for u32x8[src]

impl BitOr<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u32> for u32x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitOrAssign<u32> for u32x8[src]

impl BitXor<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u32> for u32x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u32; 8]>> for u32x8[src]

impl BitXorAssign<u32> for u32x8[src]

impl Debug for u32x8[src]

impl Default for u32x8[src]

impl Div<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u32> for u32x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u32; 8]>> for u32x8[src]

impl DivAssign<u32> for u32x8[src]

impl Eq for u32x8[src]

impl From<[u32; 8]> for u32x8[src]

impl From<Simd<[u16; 8]>> for u32x8[src]

impl From<Simd<[u8; 8]>> for u32x8[src]

impl FromBits<Simd<[f32; 8]>> for u32x8[src]

impl FromBits<Simd<[f64; 4]>> for u32x8[src]

impl FromBits<Simd<[i128; 2]>> for u32x8[src]

impl FromBits<Simd<[i16; 16]>> for u32x8[src]

impl FromBits<Simd<[i32; 8]>> for u32x8[src]

impl FromBits<Simd<[i64; 4]>> for u32x8[src]

impl FromBits<Simd<[i8; 32]>> for u32x8[src]

impl FromBits<Simd<[m128; 2]>> for u32x8[src]

impl FromBits<Simd<[m16; 16]>> for u32x8[src]

impl FromBits<Simd<[m32; 8]>> for u32x8[src]

impl FromBits<Simd<[m64; 4]>> for u32x8[src]

impl FromBits<Simd<[m8; 32]>> for u32x8[src]

impl FromBits<Simd<[u128; 2]>> for u32x8[src]

impl FromBits<Simd<[u16; 16]>> for u32x8[src]

impl FromBits<Simd<[u64; 4]>> for u32x8[src]

impl FromBits<Simd<[u8; 32]>> for u32x8[src]

impl FromBits<__m256> for u32x8[src]

impl FromBits<__m256d> for u32x8[src]

impl FromBits<__m256i> for u32x8[src]

impl FromCast<Simd<[f32; 8]>> for u32x8[src]

impl FromCast<Simd<[f64; 8]>> for u32x8[src]

impl FromCast<Simd<[i16; 8]>> for u32x8[src]

impl FromCast<Simd<[i32; 8]>> for u32x8[src]

impl FromCast<Simd<[i64; 8]>> for u32x8[src]

impl FromCast<Simd<[i8; 8]>> for u32x8[src]

impl FromCast<Simd<[isize; 8]>> for u32x8[src]

impl FromCast<Simd<[m16; 8]>> for u32x8[src]

impl FromCast<Simd<[m32; 8]>> for u32x8[src]

impl FromCast<Simd<[m64; 8]>> for u32x8[src]

impl FromCast<Simd<[m8; 8]>> for u32x8[src]

impl FromCast<Simd<[msize; 8]>> for u32x8[src]

impl FromCast<Simd<[u16; 8]>> for u32x8[src]

impl FromCast<Simd<[u64; 8]>> for u32x8[src]

impl FromCast<Simd<[u8; 8]>> for u32x8[src]

impl FromCast<Simd<[usize; 8]>> for u32x8[src]

impl Hash for u32x8[src]

impl LowerHex for u32x8[src]

impl Mul<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u32> for u32x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u32; 8]>> for u32x8[src]

impl MulAssign<u32> for u32x8[src]

impl Not for u32x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u32x8[src]

impl PartialEq<Simd<[u32; 8]>> for u32x8[src]

impl<'a> Product<&'a Simd<[u32; 8]>> for u32x8[src]

impl Product<Simd<[u32; 8]>> for u32x8[src]

impl Rem<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u32> for u32x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u32; 8]>> for u32x8[src]

impl RemAssign<u32> for u32x8[src]

impl Shl<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u32x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u32; 8]>> for u32x8[src]

impl ShlAssign<u32> for u32x8[src]

impl Shr<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u32x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u32; 8]>> for u32x8[src]

impl ShrAssign<u32> for u32x8[src]

impl Simd for u32x8[src]

type Element = u32

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[u32; 8]>> for u32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u32> for u32x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u32; 8]>> for u32x8[src]

impl SubAssign<u32> for u32x8[src]

impl<'a> Sum<&'a Simd<[u32; 8]>> for u32x8[src]

impl Sum<Simd<[u32; 8]>> for u32x8[src]

impl UpperHex for u32x8[src]

\ No newline at end of file diff --git a/packed_simd/type.u64x2.html b/packed_simd/type.u64x2.html new file mode 100644 index 000000000..f69667423 --- /dev/null +++ b/packed_simd/type.u64x2.html @@ -0,0 +1,251 @@ +packed_simd::u64x2 - Rust

[][src]Type Definition packed_simd::u64x2

type u64x2 = Simd<[u64; 2]>;

A 128-bit vector with 2 u64 lanes.

+

Implementations

impl u64x2[src]

pub const fn new(x0: u64, x1: u64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u64x2[src]

pub fn rotate_left(self, n: u64x2) -> u64x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u64x2) -> u64x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u64x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u64x2[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u64x2[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

+

pub fn min_element(self) -> u64[src]

Smallest vector element value.

+

impl u64x2[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u64x2[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u64x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u64x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u64x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u64x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u64x2[src]

pub fn eq(self, other: Self) -> m64x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl u64x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl u64x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x2>[src]

Returns a wrapper that implements Ord.

+

impl u64x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u64> for u64x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u64; 2]>> for u64x2[src]

impl AddAssign<u64> for u64x2[src]

impl Binary for u64x2[src]

impl BitAnd<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u64> for u64x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitAndAssign<u64> for u64x2[src]

impl BitOr<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u64> for u64x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitOrAssign<u64> for u64x2[src]

impl BitXor<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u64> for u64x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u64; 2]>> for u64x2[src]

impl BitXorAssign<u64> for u64x2[src]

impl Debug for u64x2[src]

impl Default for u64x2[src]

impl Div<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u64> for u64x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u64; 2]>> for u64x2[src]

impl DivAssign<u64> for u64x2[src]

impl Eq for u64x2[src]

impl From<[u64; 2]> for u64x2[src]

impl From<Simd<[u16; 2]>> for u64x2[src]

impl From<Simd<[u32; 2]>> for u64x2[src]

impl From<Simd<[u8; 2]>> for u64x2[src]

impl FromBits<Simd<[f32; 4]>> for u64x2[src]

impl FromBits<Simd<[f64; 2]>> for u64x2[src]

impl FromBits<Simd<[i128; 1]>> for u64x2[src]

impl FromBits<Simd<[i16; 8]>> for u64x2[src]

impl FromBits<Simd<[i32; 4]>> for u64x2[src]

impl FromBits<Simd<[i64; 2]>> for u64x2[src]

impl FromBits<Simd<[i8; 16]>> for u64x2[src]

impl FromBits<Simd<[m128; 1]>> for u64x2[src]

impl FromBits<Simd<[m16; 8]>> for u64x2[src]

impl FromBits<Simd<[m32; 4]>> for u64x2[src]

impl FromBits<Simd<[m64; 2]>> for u64x2[src]

impl FromBits<Simd<[m8; 16]>> for u64x2[src]

impl FromBits<Simd<[u128; 1]>> for u64x2[src]

impl FromBits<Simd<[u16; 8]>> for u64x2[src]

impl FromBits<Simd<[u32; 4]>> for u64x2[src]

impl FromBits<Simd<[u8; 16]>> for u64x2[src]

impl FromBits<__m128> for u64x2[src]

impl FromBits<__m128d> for u64x2[src]

impl FromBits<__m128i> for u64x2[src]

impl FromCast<Simd<[f32; 2]>> for u64x2[src]

impl FromCast<Simd<[f64; 2]>> for u64x2[src]

impl FromCast<Simd<[i128; 2]>> for u64x2[src]

impl FromCast<Simd<[i16; 2]>> for u64x2[src]

impl FromCast<Simd<[i32; 2]>> for u64x2[src]

impl FromCast<Simd<[i64; 2]>> for u64x2[src]

impl FromCast<Simd<[i8; 2]>> for u64x2[src]

impl FromCast<Simd<[isize; 2]>> for u64x2[src]

impl FromCast<Simd<[m128; 2]>> for u64x2[src]

impl FromCast<Simd<[m16; 2]>> for u64x2[src]

impl FromCast<Simd<[m32; 2]>> for u64x2[src]

impl FromCast<Simd<[m64; 2]>> for u64x2[src]

impl FromCast<Simd<[m8; 2]>> for u64x2[src]

impl FromCast<Simd<[msize; 2]>> for u64x2[src]

impl FromCast<Simd<[u128; 2]>> for u64x2[src]

impl FromCast<Simd<[u16; 2]>> for u64x2[src]

impl FromCast<Simd<[u32; 2]>> for u64x2[src]

impl FromCast<Simd<[u8; 2]>> for u64x2[src]

impl FromCast<Simd<[usize; 2]>> for u64x2[src]

impl Hash for u64x2[src]

impl LowerHex for u64x2[src]

impl Mul<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u64> for u64x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u64; 2]>> for u64x2[src]

impl MulAssign<u64> for u64x2[src]

impl Not for u64x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u64x2[src]

impl PartialEq<Simd<[u64; 2]>> for u64x2[src]

impl<'a> Product<&'a Simd<[u64; 2]>> for u64x2[src]

impl Product<Simd<[u64; 2]>> for u64x2[src]

impl Rem<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u64> for u64x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u64; 2]>> for u64x2[src]

impl RemAssign<u64> for u64x2[src]

impl Shl<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u64x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u64; 2]>> for u64x2[src]

impl ShlAssign<u32> for u64x2[src]

impl Shr<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u64x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u64; 2]>> for u64x2[src]

impl ShrAssign<u32> for u64x2[src]

impl Simd for u64x2[src]

type Element = u64

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[u64; 2]>> for u64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u64> for u64x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u64; 2]>> for u64x2[src]

impl SubAssign<u64> for u64x2[src]

impl<'a> Sum<&'a Simd<[u64; 2]>> for u64x2[src]

impl Sum<Simd<[u64; 2]>> for u64x2[src]

impl UpperHex for u64x2[src]

\ No newline at end of file diff --git a/packed_simd/type.u64x4.html b/packed_simd/type.u64x4.html new file mode 100644 index 000000000..9fedfeb3d --- /dev/null +++ b/packed_simd/type.u64x4.html @@ -0,0 +1,251 @@ +packed_simd::u64x4 - Rust

[][src]Type Definition packed_simd::u64x4

type u64x4 = Simd<[u64; 4]>;

A 256-bit vector with 4 u64 lanes.

+

Implementations

impl u64x4[src]

pub const fn new(x0: u64, x1: u64, x2: u64, x3: u64) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u64x4[src]

pub fn rotate_left(self, n: u64x4) -> u64x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u64x4) -> u64x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u64x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u64x4[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u64x4[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

+

pub fn min_element(self) -> u64[src]

Smallest vector element value.

+

impl u64x4[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u64x4[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u64x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u64x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u64x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u64x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u64x4[src]

pub fn eq(self, other: Self) -> m64x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl u64x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl u64x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x4>[src]

Returns a wrapper that implements Ord.

+

impl u64x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u64> for u64x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u64; 4]>> for u64x4[src]

impl AddAssign<u64> for u64x4[src]

impl Binary for u64x4[src]

impl BitAnd<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u64> for u64x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitAndAssign<u64> for u64x4[src]

impl BitOr<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u64> for u64x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitOrAssign<u64> for u64x4[src]

impl BitXor<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u64> for u64x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u64; 4]>> for u64x4[src]

impl BitXorAssign<u64> for u64x4[src]

impl Debug for u64x4[src]

impl Default for u64x4[src]

impl Div<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u64> for u64x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u64; 4]>> for u64x4[src]

impl DivAssign<u64> for u64x4[src]

impl Eq for u64x4[src]

impl From<[u64; 4]> for u64x4[src]

impl From<Simd<[u16; 4]>> for u64x4[src]

impl From<Simd<[u32; 4]>> for u64x4[src]

impl From<Simd<[u8; 4]>> for u64x4[src]

impl FromBits<Simd<[f32; 8]>> for u64x4[src]

impl FromBits<Simd<[f64; 4]>> for u64x4[src]

impl FromBits<Simd<[i128; 2]>> for u64x4[src]

impl FromBits<Simd<[i16; 16]>> for u64x4[src]

impl FromBits<Simd<[i32; 8]>> for u64x4[src]

impl FromBits<Simd<[i64; 4]>> for u64x4[src]

impl FromBits<Simd<[i8; 32]>> for u64x4[src]

impl FromBits<Simd<[m128; 2]>> for u64x4[src]

impl FromBits<Simd<[m16; 16]>> for u64x4[src]

impl FromBits<Simd<[m32; 8]>> for u64x4[src]

impl FromBits<Simd<[m64; 4]>> for u64x4[src]

impl FromBits<Simd<[m8; 32]>> for u64x4[src]

impl FromBits<Simd<[u128; 2]>> for u64x4[src]

impl FromBits<Simd<[u16; 16]>> for u64x4[src]

impl FromBits<Simd<[u32; 8]>> for u64x4[src]

impl FromBits<Simd<[u8; 32]>> for u64x4[src]

impl FromBits<__m256> for u64x4[src]

impl FromBits<__m256d> for u64x4[src]

impl FromBits<__m256i> for u64x4[src]

impl FromCast<Simd<[f32; 4]>> for u64x4[src]

impl FromCast<Simd<[f64; 4]>> for u64x4[src]

impl FromCast<Simd<[i128; 4]>> for u64x4[src]

impl FromCast<Simd<[i16; 4]>> for u64x4[src]

impl FromCast<Simd<[i32; 4]>> for u64x4[src]

impl FromCast<Simd<[i64; 4]>> for u64x4[src]

impl FromCast<Simd<[i8; 4]>> for u64x4[src]

impl FromCast<Simd<[isize; 4]>> for u64x4[src]

impl FromCast<Simd<[m128; 4]>> for u64x4[src]

impl FromCast<Simd<[m16; 4]>> for u64x4[src]

impl FromCast<Simd<[m32; 4]>> for u64x4[src]

impl FromCast<Simd<[m64; 4]>> for u64x4[src]

impl FromCast<Simd<[m8; 4]>> for u64x4[src]

impl FromCast<Simd<[msize; 4]>> for u64x4[src]

impl FromCast<Simd<[u128; 4]>> for u64x4[src]

impl FromCast<Simd<[u16; 4]>> for u64x4[src]

impl FromCast<Simd<[u32; 4]>> for u64x4[src]

impl FromCast<Simd<[u8; 4]>> for u64x4[src]

impl FromCast<Simd<[usize; 4]>> for u64x4[src]

impl Hash for u64x4[src]

impl LowerHex for u64x4[src]

impl Mul<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u64> for u64x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u64; 4]>> for u64x4[src]

impl MulAssign<u64> for u64x4[src]

impl Not for u64x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u64x4[src]

impl PartialEq<Simd<[u64; 4]>> for u64x4[src]

impl<'a> Product<&'a Simd<[u64; 4]>> for u64x4[src]

impl Product<Simd<[u64; 4]>> for u64x4[src]

impl Rem<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u64> for u64x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u64; 4]>> for u64x4[src]

impl RemAssign<u64> for u64x4[src]

impl Shl<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u64x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u64; 4]>> for u64x4[src]

impl ShlAssign<u32> for u64x4[src]

impl Shr<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u64x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u64; 4]>> for u64x4[src]

impl ShrAssign<u32> for u64x4[src]

impl Simd for u64x4[src]

type Element = u64

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[u64; 4]>> for u64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u64> for u64x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u64; 4]>> for u64x4[src]

impl SubAssign<u64> for u64x4[src]

impl<'a> Sum<&'a Simd<[u64; 4]>> for u64x4[src]

impl Sum<Simd<[u64; 4]>> for u64x4[src]

impl UpperHex for u64x4[src]

\ No newline at end of file diff --git a/packed_simd/type.u64x8.html b/packed_simd/type.u64x8.html new file mode 100644 index 000000000..3444a7fd1 --- /dev/null +++ b/packed_simd/type.u64x8.html @@ -0,0 +1,245 @@ +packed_simd::u64x8 - Rust

[][src]Type Definition packed_simd::u64x8

type u64x8 = Simd<[u64; 8]>;

A 512-bit vector with 8 u64 lanes.

+

Implementations

impl u64x8[src]

pub const fn new(
    x0: u64,
    x1: u64,
    x2: u64,
    x3: u64,
    x4: u64,
    x5: u64,
    x6: u64,
    x7: u64
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u64) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u64[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u64[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u64) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u64x8[src]

pub fn rotate_left(self, n: u64x8) -> u64x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u64x8) -> u64x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u64x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u64x8[src]

pub fn wrapping_sum(self) -> u64[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u64[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u64x8[src]

pub fn max_element(self) -> u64[src]

Largest vector element value.

+

pub fn min_element(self) -> u64[src]

Smallest vector element value.

+

impl u64x8[src]

pub fn and(self) -> u64[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u64[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u64[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u64x8[src]

pub fn from_slice_aligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u64]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u64x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u64])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u64x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u64x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u64x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u64x8[src]

pub fn eq(self, other: Self) -> m64x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m64x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m64x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m64x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m64x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m64x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl u64x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u64x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl u64x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u64x8>[src]

Returns a wrapper that implements Ord.

+

impl u64x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u64> for u64x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u64; 8]>> for u64x8[src]

impl AddAssign<u64> for u64x8[src]

impl Binary for u64x8[src]

impl BitAnd<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u64> for u64x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitAndAssign<u64> for u64x8[src]

impl BitOr<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u64> for u64x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitOrAssign<u64> for u64x8[src]

impl BitXor<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u64> for u64x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u64; 8]>> for u64x8[src]

impl BitXorAssign<u64> for u64x8[src]

impl Debug for u64x8[src]

impl Default for u64x8[src]

impl Div<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u64> for u64x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u64; 8]>> for u64x8[src]

impl DivAssign<u64> for u64x8[src]

impl Eq for u64x8[src]

impl From<[u64; 8]> for u64x8[src]

impl From<Simd<[u16; 8]>> for u64x8[src]

impl From<Simd<[u32; 8]>> for u64x8[src]

impl From<Simd<[u8; 8]>> for u64x8[src]

impl FromBits<Simd<[f32; 16]>> for u64x8[src]

impl FromBits<Simd<[f64; 8]>> for u64x8[src]

impl FromBits<Simd<[i128; 4]>> for u64x8[src]

impl FromBits<Simd<[i16; 32]>> for u64x8[src]

impl FromBits<Simd<[i32; 16]>> for u64x8[src]

impl FromBits<Simd<[i64; 8]>> for u64x8[src]

impl FromBits<Simd<[i8; 64]>> for u64x8[src]

impl FromBits<Simd<[m128; 4]>> for u64x8[src]

impl FromBits<Simd<[m16; 32]>> for u64x8[src]

impl FromBits<Simd<[m32; 16]>> for u64x8[src]

impl FromBits<Simd<[m64; 8]>> for u64x8[src]

impl FromBits<Simd<[m8; 64]>> for u64x8[src]

impl FromBits<Simd<[u128; 4]>> for u64x8[src]

impl FromBits<Simd<[u16; 32]>> for u64x8[src]

impl FromBits<Simd<[u32; 16]>> for u64x8[src]

impl FromBits<Simd<[u8; 64]>> for u64x8[src]

impl FromCast<Simd<[f32; 8]>> for u64x8[src]

impl FromCast<Simd<[f64; 8]>> for u64x8[src]

impl FromCast<Simd<[i16; 8]>> for u64x8[src]

impl FromCast<Simd<[i32; 8]>> for u64x8[src]

impl FromCast<Simd<[i64; 8]>> for u64x8[src]

impl FromCast<Simd<[i8; 8]>> for u64x8[src]

impl FromCast<Simd<[isize; 8]>> for u64x8[src]

impl FromCast<Simd<[m16; 8]>> for u64x8[src]

impl FromCast<Simd<[m32; 8]>> for u64x8[src]

impl FromCast<Simd<[m64; 8]>> for u64x8[src]

impl FromCast<Simd<[m8; 8]>> for u64x8[src]

impl FromCast<Simd<[msize; 8]>> for u64x8[src]

impl FromCast<Simd<[u16; 8]>> for u64x8[src]

impl FromCast<Simd<[u32; 8]>> for u64x8[src]

impl FromCast<Simd<[u8; 8]>> for u64x8[src]

impl FromCast<Simd<[usize; 8]>> for u64x8[src]

impl Hash for u64x8[src]

impl LowerHex for u64x8[src]

impl Mul<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u64> for u64x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u64; 8]>> for u64x8[src]

impl MulAssign<u64> for u64x8[src]

impl Not for u64x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u64x8[src]

impl PartialEq<Simd<[u64; 8]>> for u64x8[src]

impl<'a> Product<&'a Simd<[u64; 8]>> for u64x8[src]

impl Product<Simd<[u64; 8]>> for u64x8[src]

impl Rem<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u64> for u64x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u64; 8]>> for u64x8[src]

impl RemAssign<u64> for u64x8[src]

impl Shl<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u64x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u64; 8]>> for u64x8[src]

impl ShlAssign<u32> for u64x8[src]

impl Shr<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u64x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u64; 8]>> for u64x8[src]

impl ShrAssign<u32> for u64x8[src]

impl Simd for u64x8[src]

type Element = u64

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[u64; 8]>> for u64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u64> for u64x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u64; 8]>> for u64x8[src]

impl SubAssign<u64> for u64x8[src]

impl<'a> Sum<&'a Simd<[u64; 8]>> for u64x8[src]

impl Sum<Simd<[u64; 8]>> for u64x8[src]

impl UpperHex for u64x8[src]

\ No newline at end of file diff --git a/packed_simd/type.u8x16.html b/packed_simd/type.u8x16.html new file mode 100644 index 000000000..d2095efa2 --- /dev/null +++ b/packed_simd/type.u8x16.html @@ -0,0 +1,238 @@ +packed_simd::u8x16 - Rust

[][src]Type Definition packed_simd::u8x16

type u8x16 = Simd<[u8; 16]>;

A 128-bit vector with 16 u8 lanes.

+

Implementations

impl u8x16[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u8x16[src]

pub fn rotate_left(self, n: u8x16) -> u8x16[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x16) -> u8x16[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u8x16[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u8x16[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u8x16[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl u8x16[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u8x16[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x16[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x16[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u8x16[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u8x16[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u8x16[src]

pub fn eq(self, other: Self) -> m8x16[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x16[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x16[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x16[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x16[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x16[src]

Lane-wise greater-than-or-equals comparison.

+

impl u8x16[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x16>[src]

Returns a wrapper that implements PartialOrd.

+

impl u8x16[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x16>[src]

Returns a wrapper that implements Ord.

+

impl u8x16[src]

pub fn bitmask(self) -> u16[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u8> for u8x16[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u8; 16]>> for u8x16[src]

impl AddAssign<u8> for u8x16[src]

impl Binary for u8x16[src]

impl BitAnd<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u8> for u8x16[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitAndAssign<u8> for u8x16[src]

impl BitOr<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u8> for u8x16[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitOrAssign<u8> for u8x16[src]

impl BitXor<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u8> for u8x16[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u8; 16]>> for u8x16[src]

impl BitXorAssign<u8> for u8x16[src]

impl Debug for u8x16[src]

impl Default for u8x16[src]

impl Div<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u8> for u8x16[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u8; 16]>> for u8x16[src]

impl DivAssign<u8> for u8x16[src]

impl Eq for u8x16[src]

impl From<[u8; 16]> for u8x16[src]

impl FromBits<Simd<[f32; 4]>> for u8x16[src]

impl FromBits<Simd<[f64; 2]>> for u8x16[src]

impl FromBits<Simd<[i128; 1]>> for u8x16[src]

impl FromBits<Simd<[i16; 8]>> for u8x16[src]

impl FromBits<Simd<[i32; 4]>> for u8x16[src]

impl FromBits<Simd<[i64; 2]>> for u8x16[src]

impl FromBits<Simd<[i8; 16]>> for u8x16[src]

impl FromBits<Simd<[m128; 1]>> for u8x16[src]

impl FromBits<Simd<[m16; 8]>> for u8x16[src]

impl FromBits<Simd<[m32; 4]>> for u8x16[src]

impl FromBits<Simd<[m64; 2]>> for u8x16[src]

impl FromBits<Simd<[m8; 16]>> for u8x16[src]

impl FromBits<Simd<[u128; 1]>> for u8x16[src]

impl FromBits<Simd<[u16; 8]>> for u8x16[src]

impl FromBits<Simd<[u32; 4]>> for u8x16[src]

impl FromBits<Simd<[u64; 2]>> for u8x16[src]

impl FromBits<__m128> for u8x16[src]

impl FromBits<__m128d> for u8x16[src]

impl FromBits<__m128i> for u8x16[src]

impl FromCast<Simd<[f32; 16]>> for u8x16[src]

impl FromCast<Simd<[i16; 16]>> for u8x16[src]

impl FromCast<Simd<[i32; 16]>> for u8x16[src]

impl FromCast<Simd<[i8; 16]>> for u8x16[src]

impl FromCast<Simd<[m16; 16]>> for u8x16[src]

impl FromCast<Simd<[m32; 16]>> for u8x16[src]

impl FromCast<Simd<[m8; 16]>> for u8x16[src]

impl FromCast<Simd<[u16; 16]>> for u8x16[src]

impl FromCast<Simd<[u32; 16]>> for u8x16[src]

impl Hash for u8x16[src]

impl LowerHex for u8x16[src]

impl Mul<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u8> for u8x16[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u8; 16]>> for u8x16[src]

impl MulAssign<u8> for u8x16[src]

impl Not for u8x16[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u8x16[src]

impl PartialEq<Simd<[u8; 16]>> for u8x16[src]

impl<'a> Product<&'a Simd<[u8; 16]>> for u8x16[src]

impl Product<Simd<[u8; 16]>> for u8x16[src]

impl Rem<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u8> for u8x16[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u8; 16]>> for u8x16[src]

impl RemAssign<u8> for u8x16[src]

impl Shl<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u8x16[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u8; 16]>> for u8x16[src]

impl ShlAssign<u32> for u8x16[src]

impl Shr<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u8x16[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u8; 16]>> for u8x16[src]

impl ShrAssign<u32> for u8x16[src]

impl Simd for u8x16[src]

type Element = u8

Element type of the SIMD vector

+

type LanesType = [u32; 16]

The type: [u32; Self::N].

+

impl Sub<Simd<[u8; 16]>> for u8x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u8> for u8x16[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u8; 16]>> for u8x16[src]

impl SubAssign<u8> for u8x16[src]

impl<'a> Sum<&'a Simd<[u8; 16]>> for u8x16[src]

impl Sum<Simd<[u8; 16]>> for u8x16[src]

impl UpperHex for u8x16[src]

\ No newline at end of file diff --git a/packed_simd/type.u8x2.html b/packed_simd/type.u8x2.html new file mode 100644 index 000000000..d17548143 --- /dev/null +++ b/packed_simd/type.u8x2.html @@ -0,0 +1,231 @@ +packed_simd::u8x2 - Rust

[][src]Type Definition packed_simd::u8x2

type u8x2 = Simd<[u8; 2]>;

A 16-bit vector with 2 u8 lanes.

+

Implementations

impl u8x2[src]

pub const fn new(x0: u8, x1: u8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u8x2[src]

pub fn rotate_left(self, n: u8x2) -> u8x2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x2) -> u8x2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u8x2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u8x2[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u8x2[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl u8x2[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u8x2[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x2[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u8x2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u8x2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u8x2[src]

pub fn eq(self, other: Self) -> m8x2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x2[src]

Lane-wise greater-than-or-equals comparison.

+

impl u8x2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x2>[src]

Returns a wrapper that implements PartialOrd.

+

impl u8x2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x2>[src]

Returns a wrapper that implements Ord.

+

impl u8x2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u8> for u8x2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u8; 2]>> for u8x2[src]

impl AddAssign<u8> for u8x2[src]

impl Binary for u8x2[src]

impl BitAnd<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u8> for u8x2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitAndAssign<u8> for u8x2[src]

impl BitOr<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u8> for u8x2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitOrAssign<u8> for u8x2[src]

impl BitXor<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u8> for u8x2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u8; 2]>> for u8x2[src]

impl BitXorAssign<u8> for u8x2[src]

impl Debug for u8x2[src]

impl Default for u8x2[src]

impl Div<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u8> for u8x2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u8; 2]>> for u8x2[src]

impl DivAssign<u8> for u8x2[src]

impl Eq for u8x2[src]

impl From<[u8; 2]> for u8x2[src]

impl FromBits<Simd<[i8; 2]>> for u8x2[src]

impl FromBits<Simd<[m8; 2]>> for u8x2[src]

impl FromCast<Simd<[f32; 2]>> for u8x2[src]

impl FromCast<Simd<[f64; 2]>> for u8x2[src]

impl FromCast<Simd<[i128; 2]>> for u8x2[src]

impl FromCast<Simd<[i16; 2]>> for u8x2[src]

impl FromCast<Simd<[i32; 2]>> for u8x2[src]

impl FromCast<Simd<[i64; 2]>> for u8x2[src]

impl FromCast<Simd<[i8; 2]>> for u8x2[src]

impl FromCast<Simd<[isize; 2]>> for u8x2[src]

impl FromCast<Simd<[m128; 2]>> for u8x2[src]

impl FromCast<Simd<[m16; 2]>> for u8x2[src]

impl FromCast<Simd<[m32; 2]>> for u8x2[src]

impl FromCast<Simd<[m64; 2]>> for u8x2[src]

impl FromCast<Simd<[m8; 2]>> for u8x2[src]

impl FromCast<Simd<[msize; 2]>> for u8x2[src]

impl FromCast<Simd<[u128; 2]>> for u8x2[src]

impl FromCast<Simd<[u16; 2]>> for u8x2[src]

impl FromCast<Simd<[u32; 2]>> for u8x2[src]

impl FromCast<Simd<[u64; 2]>> for u8x2[src]

impl FromCast<Simd<[usize; 2]>> for u8x2[src]

impl Hash for u8x2[src]

impl LowerHex for u8x2[src]

impl Mul<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u8> for u8x2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u8; 2]>> for u8x2[src]

impl MulAssign<u8> for u8x2[src]

impl Not for u8x2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u8x2[src]

impl PartialEq<Simd<[u8; 2]>> for u8x2[src]

impl<'a> Product<&'a Simd<[u8; 2]>> for u8x2[src]

impl Product<Simd<[u8; 2]>> for u8x2[src]

impl Rem<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u8> for u8x2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u8; 2]>> for u8x2[src]

impl RemAssign<u8> for u8x2[src]

impl Shl<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u8x2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u8; 2]>> for u8x2[src]

impl ShlAssign<u32> for u8x2[src]

impl Shr<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u8x2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u8; 2]>> for u8x2[src]

impl ShrAssign<u32> for u8x2[src]

impl Simd for u8x2[src]

type Element = u8

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[u8; 2]>> for u8x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u8> for u8x2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u8; 2]>> for u8x2[src]

impl SubAssign<u8> for u8x2[src]

impl<'a> Sum<&'a Simd<[u8; 2]>> for u8x2[src]

impl Sum<Simd<[u8; 2]>> for u8x2[src]

impl UpperHex for u8x2[src]

\ No newline at end of file diff --git a/packed_simd/type.u8x32.html b/packed_simd/type.u8x32.html new file mode 100644 index 000000000..e031f40eb --- /dev/null +++ b/packed_simd/type.u8x32.html @@ -0,0 +1,234 @@ +packed_simd::u8x32 - Rust

[][src]Type Definition packed_simd::u8x32

type u8x32 = Simd<[u8; 32]>;

A 256-bit vector with 32 u8 lanes.

+

Implementations

impl u8x32[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8,
    x16: u8,
    x17: u8,
    x18: u8,
    x19: u8,
    x20: u8,
    x21: u8,
    x22: u8,
    x23: u8,
    x24: u8,
    x25: u8,
    x26: u8,
    x27: u8,
    x28: u8,
    x29: u8,
    x30: u8,
    x31: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u8x32[src]

pub fn rotate_left(self, n: u8x32) -> u8x32[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x32) -> u8x32[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u8x32[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u8x32[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u8x32[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl u8x32[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u8x32[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x32[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x32[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u8x32[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u8x32[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u8x32[src]

pub fn eq(self, other: Self) -> m8x32[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x32[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x32[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x32[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x32[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x32[src]

Lane-wise greater-than-or-equals comparison.

+

impl u8x32[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x32>[src]

Returns a wrapper that implements PartialOrd.

+

impl u8x32[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x32>[src]

Returns a wrapper that implements Ord.

+

impl u8x32[src]

pub fn bitmask(self) -> u32[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u8> for u8x32[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u8; 32]>> for u8x32[src]

impl AddAssign<u8> for u8x32[src]

impl Binary for u8x32[src]

impl BitAnd<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u8> for u8x32[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitAndAssign<u8> for u8x32[src]

impl BitOr<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u8> for u8x32[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitOrAssign<u8> for u8x32[src]

impl BitXor<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u8> for u8x32[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u8; 32]>> for u8x32[src]

impl BitXorAssign<u8> for u8x32[src]

impl Debug for u8x32[src]

impl Default for u8x32[src]

impl Div<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u8> for u8x32[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u8; 32]>> for u8x32[src]

impl DivAssign<u8> for u8x32[src]

impl Eq for u8x32[src]

impl From<[u8; 32]> for u8x32[src]

impl FromBits<Simd<[f32; 8]>> for u8x32[src]

impl FromBits<Simd<[f64; 4]>> for u8x32[src]

impl FromBits<Simd<[i128; 2]>> for u8x32[src]

impl FromBits<Simd<[i16; 16]>> for u8x32[src]

impl FromBits<Simd<[i32; 8]>> for u8x32[src]

impl FromBits<Simd<[i64; 4]>> for u8x32[src]

impl FromBits<Simd<[i8; 32]>> for u8x32[src]

impl FromBits<Simd<[m128; 2]>> for u8x32[src]

impl FromBits<Simd<[m16; 16]>> for u8x32[src]

impl FromBits<Simd<[m32; 8]>> for u8x32[src]

impl FromBits<Simd<[m64; 4]>> for u8x32[src]

impl FromBits<Simd<[m8; 32]>> for u8x32[src]

impl FromBits<Simd<[u128; 2]>> for u8x32[src]

impl FromBits<Simd<[u16; 16]>> for u8x32[src]

impl FromBits<Simd<[u32; 8]>> for u8x32[src]

impl FromBits<Simd<[u64; 4]>> for u8x32[src]

impl FromBits<__m256> for u8x32[src]

impl FromBits<__m256d> for u8x32[src]

impl FromBits<__m256i> for u8x32[src]

impl FromCast<Simd<[i16; 32]>> for u8x32[src]

impl FromCast<Simd<[i8; 32]>> for u8x32[src]

impl FromCast<Simd<[m16; 32]>> for u8x32[src]

impl FromCast<Simd<[m8; 32]>> for u8x32[src]

impl FromCast<Simd<[u16; 32]>> for u8x32[src]

impl Hash for u8x32[src]

impl LowerHex for u8x32[src]

impl Mul<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u8> for u8x32[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u8; 32]>> for u8x32[src]

impl MulAssign<u8> for u8x32[src]

impl Not for u8x32[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u8x32[src]

impl PartialEq<Simd<[u8; 32]>> for u8x32[src]

impl<'a> Product<&'a Simd<[u8; 32]>> for u8x32[src]

impl Product<Simd<[u8; 32]>> for u8x32[src]

impl Rem<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u8> for u8x32[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u8; 32]>> for u8x32[src]

impl RemAssign<u8> for u8x32[src]

impl Shl<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u8x32[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u8; 32]>> for u8x32[src]

impl ShlAssign<u32> for u8x32[src]

impl Shr<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u8x32[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u8; 32]>> for u8x32[src]

impl ShrAssign<u32> for u8x32[src]

impl Simd for u8x32[src]

type Element = u8

Element type of the SIMD vector

+

type LanesType = [u32; 32]

The type: [u32; Self::N].

+

impl Sub<Simd<[u8; 32]>> for u8x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u8> for u8x32[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u8; 32]>> for u8x32[src]

impl SubAssign<u8> for u8x32[src]

impl<'a> Sum<&'a Simd<[u8; 32]>> for u8x32[src]

impl Sum<Simd<[u8; 32]>> for u8x32[src]

impl UpperHex for u8x32[src]

\ No newline at end of file diff --git a/packed_simd/type.u8x4.html b/packed_simd/type.u8x4.html new file mode 100644 index 000000000..8ac1df9a1 --- /dev/null +++ b/packed_simd/type.u8x4.html @@ -0,0 +1,234 @@ +packed_simd::u8x4 - Rust

[][src]Type Definition packed_simd::u8x4

type u8x4 = Simd<[u8; 4]>;

A 32-bit vector with 4 u8 lanes.

+

Implementations

impl u8x4[src]

pub const fn new(x0: u8, x1: u8, x2: u8, x3: u8) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u8x4[src]

pub fn rotate_left(self, n: u8x4) -> u8x4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x4) -> u8x4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u8x4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u8x4[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u8x4[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl u8x4[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u8x4[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x4[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u8x4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u8x4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u8x4[src]

pub fn eq(self, other: Self) -> m8x4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x4[src]

Lane-wise greater-than-or-equals comparison.

+

impl u8x4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x4>[src]

Returns a wrapper that implements PartialOrd.

+

impl u8x4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x4>[src]

Returns a wrapper that implements Ord.

+

impl u8x4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u8> for u8x4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u8; 4]>> for u8x4[src]

impl AddAssign<u8> for u8x4[src]

impl Binary for u8x4[src]

impl BitAnd<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u8> for u8x4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitAndAssign<u8> for u8x4[src]

impl BitOr<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u8> for u8x4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitOrAssign<u8> for u8x4[src]

impl BitXor<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u8> for u8x4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u8; 4]>> for u8x4[src]

impl BitXorAssign<u8> for u8x4[src]

impl Debug for u8x4[src]

impl Default for u8x4[src]

impl Div<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u8> for u8x4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u8; 4]>> for u8x4[src]

impl DivAssign<u8> for u8x4[src]

impl Eq for u8x4[src]

impl From<[u8; 4]> for u8x4[src]

impl FromBits<Simd<[i16; 2]>> for u8x4[src]

impl FromBits<Simd<[i8; 4]>> for u8x4[src]

impl FromBits<Simd<[m16; 2]>> for u8x4[src]

impl FromBits<Simd<[m8; 4]>> for u8x4[src]

impl FromBits<Simd<[u16; 2]>> for u8x4[src]

impl FromCast<Simd<[f32; 4]>> for u8x4[src]

impl FromCast<Simd<[f64; 4]>> for u8x4[src]

impl FromCast<Simd<[i128; 4]>> for u8x4[src]

impl FromCast<Simd<[i16; 4]>> for u8x4[src]

impl FromCast<Simd<[i32; 4]>> for u8x4[src]

impl FromCast<Simd<[i64; 4]>> for u8x4[src]

impl FromCast<Simd<[i8; 4]>> for u8x4[src]

impl FromCast<Simd<[isize; 4]>> for u8x4[src]

impl FromCast<Simd<[m128; 4]>> for u8x4[src]

impl FromCast<Simd<[m16; 4]>> for u8x4[src]

impl FromCast<Simd<[m32; 4]>> for u8x4[src]

impl FromCast<Simd<[m64; 4]>> for u8x4[src]

impl FromCast<Simd<[m8; 4]>> for u8x4[src]

impl FromCast<Simd<[msize; 4]>> for u8x4[src]

impl FromCast<Simd<[u128; 4]>> for u8x4[src]

impl FromCast<Simd<[u16; 4]>> for u8x4[src]

impl FromCast<Simd<[u32; 4]>> for u8x4[src]

impl FromCast<Simd<[u64; 4]>> for u8x4[src]

impl FromCast<Simd<[usize; 4]>> for u8x4[src]

impl Hash for u8x4[src]

impl LowerHex for u8x4[src]

impl Mul<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u8> for u8x4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u8; 4]>> for u8x4[src]

impl MulAssign<u8> for u8x4[src]

impl Not for u8x4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u8x4[src]

impl PartialEq<Simd<[u8; 4]>> for u8x4[src]

impl<'a> Product<&'a Simd<[u8; 4]>> for u8x4[src]

impl Product<Simd<[u8; 4]>> for u8x4[src]

impl Rem<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u8> for u8x4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u8; 4]>> for u8x4[src]

impl RemAssign<u8> for u8x4[src]

impl Shl<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u8x4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u8; 4]>> for u8x4[src]

impl ShlAssign<u32> for u8x4[src]

impl Shr<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u8x4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u8; 4]>> for u8x4[src]

impl ShrAssign<u32> for u8x4[src]

impl Simd for u8x4[src]

type Element = u8

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[u8; 4]>> for u8x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u8> for u8x4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u8; 4]>> for u8x4[src]

impl SubAssign<u8> for u8x4[src]

impl<'a> Sum<&'a Simd<[u8; 4]>> for u8x4[src]

impl Sum<Simd<[u8; 4]>> for u8x4[src]

impl UpperHex for u8x4[src]

\ No newline at end of file diff --git a/packed_simd/type.u8x64.html b/packed_simd/type.u8x64.html new file mode 100644 index 000000000..c039706fc --- /dev/null +++ b/packed_simd/type.u8x64.html @@ -0,0 +1,228 @@ +packed_simd::u8x64 - Rust

[][src]Type Definition packed_simd::u8x64

type u8x64 = Simd<[u8; 64]>;

A 512-bit vector with 64 u8 lanes.

+

Implementations

impl u8x64[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8,
    x8: u8,
    x9: u8,
    x10: u8,
    x11: u8,
    x12: u8,
    x13: u8,
    x14: u8,
    x15: u8,
    x16: u8,
    x17: u8,
    x18: u8,
    x19: u8,
    x20: u8,
    x21: u8,
    x22: u8,
    x23: u8,
    x24: u8,
    x25: u8,
    x26: u8,
    x27: u8,
    x28: u8,
    x29: u8,
    x30: u8,
    x31: u8,
    x32: u8,
    x33: u8,
    x34: u8,
    x35: u8,
    x36: u8,
    x37: u8,
    x38: u8,
    x39: u8,
    x40: u8,
    x41: u8,
    x42: u8,
    x43: u8,
    x44: u8,
    x45: u8,
    x46: u8,
    x47: u8,
    x48: u8,
    x49: u8,
    x50: u8,
    x51: u8,
    x52: u8,
    x53: u8,
    x54: u8,
    x55: u8,
    x56: u8,
    x57: u8,
    x58: u8,
    x59: u8,
    x60: u8,
    x61: u8,
    x62: u8,
    x63: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u8x64[src]

pub fn rotate_left(self, n: u8x64) -> u8x64[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x64) -> u8x64[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u8x64[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u8x64[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u8x64[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl u8x64[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u8x64[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x64[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x64[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u8x64[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u8x64[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u8x64[src]

pub fn eq(self, other: Self) -> m8x64[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x64[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x64[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x64[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x64[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x64[src]

Lane-wise greater-than-or-equals comparison.

+

impl u8x64[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x64>[src]

Returns a wrapper that implements PartialOrd.

+

impl u8x64[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x64>[src]

Returns a wrapper that implements Ord.

+

impl u8x64[src]

pub fn bitmask(self) -> u64[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u8> for u8x64[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u8; 64]>> for u8x64[src]

impl AddAssign<u8> for u8x64[src]

impl Binary for u8x64[src]

impl BitAnd<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u8> for u8x64[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitAndAssign<u8> for u8x64[src]

impl BitOr<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u8> for u8x64[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitOrAssign<u8> for u8x64[src]

impl BitXor<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u8> for u8x64[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u8; 64]>> for u8x64[src]

impl BitXorAssign<u8> for u8x64[src]

impl Debug for u8x64[src]

impl Default for u8x64[src]

impl Div<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u8> for u8x64[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u8; 64]>> for u8x64[src]

impl DivAssign<u8> for u8x64[src]

impl Eq for u8x64[src]

impl From<[u8; 64]> for u8x64[src]

impl FromBits<Simd<[f32; 16]>> for u8x64[src]

impl FromBits<Simd<[f64; 8]>> for u8x64[src]

impl FromBits<Simd<[i128; 4]>> for u8x64[src]

impl FromBits<Simd<[i16; 32]>> for u8x64[src]

impl FromBits<Simd<[i32; 16]>> for u8x64[src]

impl FromBits<Simd<[i64; 8]>> for u8x64[src]

impl FromBits<Simd<[i8; 64]>> for u8x64[src]

impl FromBits<Simd<[m128; 4]>> for u8x64[src]

impl FromBits<Simd<[m16; 32]>> for u8x64[src]

impl FromBits<Simd<[m32; 16]>> for u8x64[src]

impl FromBits<Simd<[m64; 8]>> for u8x64[src]

impl FromBits<Simd<[m8; 64]>> for u8x64[src]

impl FromBits<Simd<[u128; 4]>> for u8x64[src]

impl FromBits<Simd<[u16; 32]>> for u8x64[src]

impl FromBits<Simd<[u32; 16]>> for u8x64[src]

impl FromBits<Simd<[u64; 8]>> for u8x64[src]

impl FromCast<Simd<[i8; 64]>> for u8x64[src]

impl FromCast<Simd<[m8; 64]>> for u8x64[src]

impl Hash for u8x64[src]

impl LowerHex for u8x64[src]

impl Mul<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u8> for u8x64[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u8; 64]>> for u8x64[src]

impl MulAssign<u8> for u8x64[src]

impl Not for u8x64[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u8x64[src]

impl PartialEq<Simd<[u8; 64]>> for u8x64[src]

impl<'a> Product<&'a Simd<[u8; 64]>> for u8x64[src]

impl Product<Simd<[u8; 64]>> for u8x64[src]

impl Rem<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u8> for u8x64[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u8; 64]>> for u8x64[src]

impl RemAssign<u8> for u8x64[src]

impl Shl<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u8x64[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u8; 64]>> for u8x64[src]

impl ShlAssign<u32> for u8x64[src]

impl Shr<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u8x64[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u8; 64]>> for u8x64[src]

impl ShrAssign<u32> for u8x64[src]

impl Simd for u8x64[src]

type Element = u8

Element type of the SIMD vector

+

type LanesType = [u32; 64]

The type: [u32; Self::N].

+

impl Sub<Simd<[u8; 64]>> for u8x64[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u8> for u8x64[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u8; 64]>> for u8x64[src]

impl SubAssign<u8> for u8x64[src]

impl<'a> Sum<&'a Simd<[u8; 64]>> for u8x64[src]

impl Sum<Simd<[u8; 64]>> for u8x64[src]

impl UpperHex for u8x64[src]

\ No newline at end of file diff --git a/packed_simd/type.u8x8.html b/packed_simd/type.u8x8.html new file mode 100644 index 000000000..55871a0ec --- /dev/null +++ b/packed_simd/type.u8x8.html @@ -0,0 +1,236 @@ +packed_simd::u8x8 - Rust

[][src]Type Definition packed_simd::u8x8

type u8x8 = Simd<[u8; 8]>;

A 64-bit vector with 8 u8 lanes.

+

Implementations

impl u8x8[src]

pub const fn new(
    x0: u8,
    x1: u8,
    x2: u8,
    x3: u8,
    x4: u8,
    x5: u8,
    x6: u8,
    x7: u8
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: u8) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> u8[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> u8[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: u8) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl u8x8[src]

pub fn rotate_left(self, n: u8x8) -> u8x8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: u8x8) -> u8x8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl u8x8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl u8x8[src]

pub fn wrapping_sum(self) -> u8[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> u8[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl u8x8[src]

pub fn max_element(self) -> u8[src]

Largest vector element value.

+

pub fn min_element(self) -> u8[src]

Smallest vector element value.

+

impl u8x8[src]

pub fn and(self) -> u8[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> u8[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> u8[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl u8x8[src]

pub fn from_slice_aligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[u8]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x8[src]

pub fn write_to_slice_aligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [u8])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl u8x8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl u8x8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl u8x8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl u8x8[src]

pub fn eq(self, other: Self) -> m8x8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> m8x8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> m8x8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> m8x8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> m8x8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> m8x8[src]

Lane-wise greater-than-or-equals comparison.

+

impl u8x8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<u8x8>[src]

Returns a wrapper that implements PartialOrd.

+

impl u8x8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<u8x8>[src]

Returns a wrapper that implements Ord.

+

impl u8x8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<u8> for u8x8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[u8; 8]>> for u8x8[src]

impl AddAssign<u8> for u8x8[src]

impl Binary for u8x8[src]

impl BitAnd<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<u8> for u8x8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitAndAssign<u8> for u8x8[src]

impl BitOr<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<u8> for u8x8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitOrAssign<u8> for u8x8[src]

impl BitXor<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<u8> for u8x8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[u8; 8]>> for u8x8[src]

impl BitXorAssign<u8> for u8x8[src]

impl Debug for u8x8[src]

impl Default for u8x8[src]

impl Div<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<u8> for u8x8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[u8; 8]>> for u8x8[src]

impl DivAssign<u8> for u8x8[src]

impl Eq for u8x8[src]

impl From<[u8; 8]> for u8x8[src]

impl FromBits<Simd<[f32; 2]>> for u8x8[src]

impl FromBits<Simd<[i16; 4]>> for u8x8[src]

impl FromBits<Simd<[i32; 2]>> for u8x8[src]

impl FromBits<Simd<[i8; 8]>> for u8x8[src]

impl FromBits<Simd<[m16; 4]>> for u8x8[src]

impl FromBits<Simd<[m32; 2]>> for u8x8[src]

impl FromBits<Simd<[m8; 8]>> for u8x8[src]

impl FromBits<Simd<[u16; 4]>> for u8x8[src]

impl FromBits<Simd<[u32; 2]>> for u8x8[src]

impl FromBits<__m64> for u8x8[src]

impl FromCast<Simd<[f32; 8]>> for u8x8[src]

impl FromCast<Simd<[f64; 8]>> for u8x8[src]

impl FromCast<Simd<[i16; 8]>> for u8x8[src]

impl FromCast<Simd<[i32; 8]>> for u8x8[src]

impl FromCast<Simd<[i64; 8]>> for u8x8[src]

impl FromCast<Simd<[i8; 8]>> for u8x8[src]

impl FromCast<Simd<[isize; 8]>> for u8x8[src]

impl FromCast<Simd<[m16; 8]>> for u8x8[src]

impl FromCast<Simd<[m32; 8]>> for u8x8[src]

impl FromCast<Simd<[m64; 8]>> for u8x8[src]

impl FromCast<Simd<[m8; 8]>> for u8x8[src]

impl FromCast<Simd<[msize; 8]>> for u8x8[src]

impl FromCast<Simd<[u16; 8]>> for u8x8[src]

impl FromCast<Simd<[u32; 8]>> for u8x8[src]

impl FromCast<Simd<[u64; 8]>> for u8x8[src]

impl FromCast<Simd<[usize; 8]>> for u8x8[src]

impl Hash for u8x8[src]

impl LowerHex for u8x8[src]

impl Mul<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<u8> for u8x8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[u8; 8]>> for u8x8[src]

impl MulAssign<u8> for u8x8[src]

impl Not for u8x8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for u8x8[src]

impl PartialEq<Simd<[u8; 8]>> for u8x8[src]

impl<'a> Product<&'a Simd<[u8; 8]>> for u8x8[src]

impl Product<Simd<[u8; 8]>> for u8x8[src]

impl Rem<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<u8> for u8x8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[u8; 8]>> for u8x8[src]

impl RemAssign<u8> for u8x8[src]

impl Shl<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for u8x8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[u8; 8]>> for u8x8[src]

impl ShlAssign<u32> for u8x8[src]

impl Shr<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for u8x8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[u8; 8]>> for u8x8[src]

impl ShrAssign<u32> for u8x8[src]

impl Simd for u8x8[src]

type Element = u8

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[u8; 8]>> for u8x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<u8> for u8x8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[u8; 8]>> for u8x8[src]

impl SubAssign<u8> for u8x8[src]

impl<'a> Sum<&'a Simd<[u8; 8]>> for u8x8[src]

impl Sum<Simd<[u8; 8]>> for u8x8[src]

impl UpperHex for u8x8[src]

\ No newline at end of file diff --git a/packed_simd/type.usizex2.html b/packed_simd/type.usizex2.html new file mode 100644 index 000000000..84dc590fb --- /dev/null +++ b/packed_simd/type.usizex2.html @@ -0,0 +1,229 @@ +packed_simd::usizex2 - Rust

[][src]Type Definition packed_simd::usizex2

type usizex2 = Simd<[usize; 2]>;

A vector with 2 usize lanes.

+

Implementations

impl usizex2[src]

pub const fn new(x0: usize, x1: usize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl usizex2[src]

pub fn rotate_left(self, n: usizex2) -> usizex2[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: usizex2) -> usizex2[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl usizex2[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl usizex2[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl usizex2[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

+

pub fn min_element(self) -> usize[src]

Smallest vector element value.

+

impl usizex2[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl usizex2[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl usizex2[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl usizex2[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl usizex2[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl usizex2[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl usizex2[src]

pub fn eq(self, other: Self) -> msizex2[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex2[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex2[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex2[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex2[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex2[src]

Lane-wise greater-than-or-equals comparison.

+

impl usizex2[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex2>[src]

Returns a wrapper that implements PartialOrd.

+

impl usizex2[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex2>[src]

Returns a wrapper that implements Ord.

+

impl usizex2[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<usize> for usizex2[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[usize; 2]>> for usizex2[src]

impl AddAssign<usize> for usizex2[src]

impl Binary for usizex2[src]

impl BitAnd<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<usize> for usizex2[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitAndAssign<usize> for usizex2[src]

impl BitOr<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<usize> for usizex2[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitOrAssign<usize> for usizex2[src]

impl BitXor<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<usize> for usizex2[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[usize; 2]>> for usizex2[src]

impl BitXorAssign<usize> for usizex2[src]

impl Debug for usizex2[src]

impl Default for usizex2[src]

impl Div<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<usize> for usizex2[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[usize; 2]>> for usizex2[src]

impl DivAssign<usize> for usizex2[src]

impl Eq for usizex2[src]

impl From<[usize; 2]> for usizex2[src]

impl FromCast<Simd<[f32; 2]>> for usizex2[src]

impl FromCast<Simd<[f64; 2]>> for usizex2[src]

impl FromCast<Simd<[i128; 2]>> for usizex2[src]

impl FromCast<Simd<[i16; 2]>> for usizex2[src]

impl FromCast<Simd<[i32; 2]>> for usizex2[src]

impl FromCast<Simd<[i64; 2]>> for usizex2[src]

impl FromCast<Simd<[i8; 2]>> for usizex2[src]

impl FromCast<Simd<[isize; 2]>> for usizex2[src]

impl FromCast<Simd<[m128; 2]>> for usizex2[src]

impl FromCast<Simd<[m16; 2]>> for usizex2[src]

impl FromCast<Simd<[m32; 2]>> for usizex2[src]

impl FromCast<Simd<[m64; 2]>> for usizex2[src]

impl FromCast<Simd<[m8; 2]>> for usizex2[src]

impl FromCast<Simd<[msize; 2]>> for usizex2[src]

impl FromCast<Simd<[u128; 2]>> for usizex2[src]

impl FromCast<Simd<[u16; 2]>> for usizex2[src]

impl FromCast<Simd<[u32; 2]>> for usizex2[src]

impl FromCast<Simd<[u64; 2]>> for usizex2[src]

impl FromCast<Simd<[u8; 2]>> for usizex2[src]

impl Hash for usizex2[src]

impl LowerHex for usizex2[src]

impl Mul<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<usize> for usizex2[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[usize; 2]>> for usizex2[src]

impl MulAssign<usize> for usizex2[src]

impl Not for usizex2[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for usizex2[src]

impl PartialEq<Simd<[usize; 2]>> for usizex2[src]

impl<'a> Product<&'a Simd<[usize; 2]>> for usizex2[src]

impl Product<Simd<[usize; 2]>> for usizex2[src]

impl Rem<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<usize> for usizex2[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[usize; 2]>> for usizex2[src]

impl RemAssign<usize> for usizex2[src]

impl Shl<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for usizex2[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[usize; 2]>> for usizex2[src]

impl ShlAssign<u32> for usizex2[src]

impl Shr<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for usizex2[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[usize; 2]>> for usizex2[src]

impl ShrAssign<u32> for usizex2[src]

impl Simd for usizex2[src]

type Element = usize

Element type of the SIMD vector

+

type LanesType = [u32; 2]

The type: [u32; Self::N].

+

impl Sub<Simd<[usize; 2]>> for usizex2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<usize> for usizex2[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[usize; 2]>> for usizex2[src]

impl SubAssign<usize> for usizex2[src]

impl<'a> Sum<&'a Simd<[usize; 2]>> for usizex2[src]

impl Sum<Simd<[usize; 2]>> for usizex2[src]

impl UpperHex for usizex2[src]

\ No newline at end of file diff --git a/packed_simd/type.usizex4.html b/packed_simd/type.usizex4.html new file mode 100644 index 000000000..cf63b4503 --- /dev/null +++ b/packed_simd/type.usizex4.html @@ -0,0 +1,229 @@ +packed_simd::usizex4 - Rust

[][src]Type Definition packed_simd::usizex4

type usizex4 = Simd<[usize; 4]>;

A vector with 4 usize lanes.

+

Implementations

impl usizex4[src]

pub const fn new(x0: usize, x1: usize, x2: usize, x3: usize) -> Self[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl usizex4[src]

pub fn rotate_left(self, n: usizex4) -> usizex4[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: usizex4) -> usizex4[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl usizex4[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl usizex4[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl usizex4[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

+

pub fn min_element(self) -> usize[src]

Smallest vector element value.

+

impl usizex4[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl usizex4[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl usizex4[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl usizex4[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl usizex4[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl usizex4[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl usizex4[src]

pub fn eq(self, other: Self) -> msizex4[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex4[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex4[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex4[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex4[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex4[src]

Lane-wise greater-than-or-equals comparison.

+

impl usizex4[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex4>[src]

Returns a wrapper that implements PartialOrd.

+

impl usizex4[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex4>[src]

Returns a wrapper that implements Ord.

+

impl usizex4[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<usize> for usizex4[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[usize; 4]>> for usizex4[src]

impl AddAssign<usize> for usizex4[src]

impl Binary for usizex4[src]

impl BitAnd<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<usize> for usizex4[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitAndAssign<usize> for usizex4[src]

impl BitOr<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<usize> for usizex4[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitOrAssign<usize> for usizex4[src]

impl BitXor<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<usize> for usizex4[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[usize; 4]>> for usizex4[src]

impl BitXorAssign<usize> for usizex4[src]

impl Debug for usizex4[src]

impl Default for usizex4[src]

impl Div<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<usize> for usizex4[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[usize; 4]>> for usizex4[src]

impl DivAssign<usize> for usizex4[src]

impl Eq for usizex4[src]

impl From<[usize; 4]> for usizex4[src]

impl FromCast<Simd<[f32; 4]>> for usizex4[src]

impl FromCast<Simd<[f64; 4]>> for usizex4[src]

impl FromCast<Simd<[i128; 4]>> for usizex4[src]

impl FromCast<Simd<[i16; 4]>> for usizex4[src]

impl FromCast<Simd<[i32; 4]>> for usizex4[src]

impl FromCast<Simd<[i64; 4]>> for usizex4[src]

impl FromCast<Simd<[i8; 4]>> for usizex4[src]

impl FromCast<Simd<[isize; 4]>> for usizex4[src]

impl FromCast<Simd<[m128; 4]>> for usizex4[src]

impl FromCast<Simd<[m16; 4]>> for usizex4[src]

impl FromCast<Simd<[m32; 4]>> for usizex4[src]

impl FromCast<Simd<[m64; 4]>> for usizex4[src]

impl FromCast<Simd<[m8; 4]>> for usizex4[src]

impl FromCast<Simd<[msize; 4]>> for usizex4[src]

impl FromCast<Simd<[u128; 4]>> for usizex4[src]

impl FromCast<Simd<[u16; 4]>> for usizex4[src]

impl FromCast<Simd<[u32; 4]>> for usizex4[src]

impl FromCast<Simd<[u64; 4]>> for usizex4[src]

impl FromCast<Simd<[u8; 4]>> for usizex4[src]

impl Hash for usizex4[src]

impl LowerHex for usizex4[src]

impl Mul<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<usize> for usizex4[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[usize; 4]>> for usizex4[src]

impl MulAssign<usize> for usizex4[src]

impl Not for usizex4[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for usizex4[src]

impl PartialEq<Simd<[usize; 4]>> for usizex4[src]

impl<'a> Product<&'a Simd<[usize; 4]>> for usizex4[src]

impl Product<Simd<[usize; 4]>> for usizex4[src]

impl Rem<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<usize> for usizex4[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[usize; 4]>> for usizex4[src]

impl RemAssign<usize> for usizex4[src]

impl Shl<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for usizex4[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[usize; 4]>> for usizex4[src]

impl ShlAssign<u32> for usizex4[src]

impl Shr<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for usizex4[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[usize; 4]>> for usizex4[src]

impl ShrAssign<u32> for usizex4[src]

impl Simd for usizex4[src]

type Element = usize

Element type of the SIMD vector

+

type LanesType = [u32; 4]

The type: [u32; Self::N].

+

impl Sub<Simd<[usize; 4]>> for usizex4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<usize> for usizex4[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[usize; 4]>> for usizex4[src]

impl SubAssign<usize> for usizex4[src]

impl<'a> Sum<&'a Simd<[usize; 4]>> for usizex4[src]

impl Sum<Simd<[usize; 4]>> for usizex4[src]

impl UpperHex for usizex4[src]

\ No newline at end of file diff --git a/packed_simd/type.usizex8.html b/packed_simd/type.usizex8.html new file mode 100644 index 000000000..880e937ea --- /dev/null +++ b/packed_simd/type.usizex8.html @@ -0,0 +1,226 @@ +packed_simd::usizex8 - Rust

[][src]Type Definition packed_simd::usizex8

type usizex8 = Simd<[usize; 8]>;

A vector with 8 usize lanes.

+

Implementations

impl usizex8[src]

pub const fn new(
    x0: usize,
    x1: usize,
    x2: usize,
    x3: usize,
    x4: usize,
    x5: usize,
    x6: usize,
    x7: usize
) -> Self
[src]

Creates a new instance with each vector elements initialized +with the provided values.

+

pub const fn lanes() -> usize[src]

Returns the number of vector lanes.

+

pub const fn splat(value: usize) -> Self[src]

Constructs a new instance with each element initialized to +value.

+

pub fn extract(self, index: usize) -> usize[src]

Extracts the value at index.

+

Panics

+

If index >= Self::lanes().

+

pub unsafe fn extract_unchecked(self, index: usize) -> usize[src]

Extracts the value at index.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

#[must_use = + "replace does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub fn replace(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Panics

+

If index >= Self::lanes().

+

#[must_use = + "replace_unchecked does not modify the original value - \ + it returns a new vector with the value at `index` \ + replaced by `new_value`d"]pub unsafe fn replace_unchecked(self, index: usize, new_value: usize) -> Self[src]

Returns a new vector where the value at index is replaced by new_value.

+

Precondition

+

If index >= Self::lanes() the behavior is undefined.

+

impl usizex8[src]

pub fn rotate_left(self, n: usizex8) -> usizex8[src]

Shifts the bits of each lane to the left by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the end of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

pub fn rotate_right(self, n: usizex8) -> usizex8[src]

Shifts the bits of each lane to the right by the specified +amount in the corresponding lane of n, wrapping the +truncated bits to the beginning of the resulting integer.

+

Note: this is neither the same operation as << nor equivalent +to slice::rotate_left.

+

impl usizex8[src]

pub fn min(self, x: Self) -> Self[src]

Minimum of two vectors.

+

Returns a new vector containing the minimum value of each of +the input vector lanes.

+

pub fn max(self, x: Self) -> Self[src]

Maximum of two vectors.

+

Returns a new vector containing the maximum value of each of +the input vector lanes.

+

impl usizex8[src]

pub fn wrapping_sum(self) -> usize[src]

Horizontal wrapping sum of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

pub fn wrapping_product(self) -> usize[src]

Horizontal wrapping product of the vector elements.

+

The intrinsic performs a tree-reduction of the vector elements. +That is, for an 8 element vector:

+
+

((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))

+
+

If an operation overflows it returns the mathematical result +modulo 2^n where n is the number of times it overflows.

+

impl usizex8[src]

pub fn max_element(self) -> usize[src]

Largest vector element value.

+

pub fn min_element(self) -> usize[src]

Smallest vector element value.

+

impl usizex8[src]

pub fn and(self) -> usize[src]

Lane-wise bitwise and of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn or(self) -> usize[src]

Lane-wise bitwise or of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

pub fn xor(self) -> usize[src]

Lane-wise bitwise xor of the vector elements.

+

Note: if the vector has one lane, the first element of the +vector is returned.

+

impl usizex8[src]

pub fn from_slice_aligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary.

+

pub fn from_slice_unaligned(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn from_slice_aligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not aligned +to an align_of::<Self>() boundary, the behavior is undefined.

+

pub unsafe fn from_slice_unaligned_unchecked(slice: &[usize]) -> Self[src]

Instantiates a new vector with the values of the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl usizex8[src]

pub fn write_to_slice_aligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary.

+

pub fn write_to_slice_unaligned(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Panics

+

If slice.len() < Self::lanes().

+

pub unsafe fn write_to_slice_aligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() or &slice[0] is not +aligned to an align_of::<Self>() boundary, the behavior is +undefined.

+

pub unsafe fn write_to_slice_unaligned_unchecked(self, slice: &mut [usize])[src]

Writes the values of the vector to the slice.

+

Precondition

+

If slice.len() < Self::lanes() the behavior is undefined.

+

impl usizex8[src]

pub fn swap_bytes(self) -> Self[src]

Reverses the byte order of the vector.

+

pub fn to_le(self) -> Self[src]

Converts self to little endian from the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn to_be(self) -> Self[src]

Converts self to big endian from the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

pub fn from_le(x: Self) -> Self[src]

Converts a vector from little endian to the target's endianness.

+

On little endian this is a no-op. On big endian the bytes are +swapped.

+

pub fn from_be(x: Self) -> Self[src]

Converts a vector from big endian to the target's endianness.

+

On big endian this is a no-op. On little endian the bytes are +swapped.

+

impl usizex8[src]

pub fn count_ones(self) -> Self[src]

Returns the number of ones in the binary representation of +the lanes of self.

+

pub fn count_zeros(self) -> Self[src]

Returns the number of zeros in the binary representation of +the lanes of self.

+

pub fn leading_zeros(self) -> Self[src]

Returns the number of leading zeros in the binary +representation of the lanes of self.

+

pub fn trailing_zeros(self) -> Self[src]

Returns the number of trailing zeros in the binary +representation of the lanes of self.

+

impl usizex8[src]

pub fn shuffle1_dyn<I>(self, indices: I) -> Self where
    Self: Shuffle1Dyn<Indices = I>, 
[src]

Shuffle vector elements according to indices.

+

impl usizex8[src]

pub fn eq(self, other: Self) -> msizex8[src]

Lane-wise equality comparison.

+

pub fn ne(self, other: Self) -> msizex8[src]

Lane-wise inequality comparison.

+

pub fn lt(self, other: Self) -> msizex8[src]

Lane-wise less-than comparison.

+

pub fn le(self, other: Self) -> msizex8[src]

Lane-wise less-than-or-equals comparison.

+

pub fn gt(self, other: Self) -> msizex8[src]

Lane-wise greater-than comparison.

+

pub fn ge(self, other: Self) -> msizex8[src]

Lane-wise greater-than-or-equals comparison.

+

impl usizex8[src]

pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<usizex8>[src]

Returns a wrapper that implements PartialOrd.

+

impl usizex8[src]

pub fn lex_ord(&self) -> LexicographicallyOrdered<usizex8>[src]

Returns a wrapper that implements Ord.

+

impl usizex8[src]

pub fn bitmask(self) -> u8[src]

Creates a bitmask with the MSB of each vector lane.

+

If the vector has less than 8 lanes, the bits that do not +correspond to any vector lanes are cleared.

+

Trait Implementations

impl Add<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl Add<usize> for usizex8[src]

type Output = Self

The resulting type after applying the + operator.

+

impl AddAssign<Simd<[usize; 8]>> for usizex8[src]

impl AddAssign<usize> for usizex8[src]

impl Binary for usizex8[src]

impl BitAnd<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAnd<usize> for usizex8[src]

type Output = Self

The resulting type after applying the & operator.

+

impl BitAndAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitAndAssign<usize> for usizex8[src]

impl BitOr<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOr<usize> for usizex8[src]

type Output = Self

The resulting type after applying the | operator.

+

impl BitOrAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitOrAssign<usize> for usizex8[src]

impl BitXor<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXor<usize> for usizex8[src]

type Output = Self

The resulting type after applying the ^ operator.

+

impl BitXorAssign<Simd<[usize; 8]>> for usizex8[src]

impl BitXorAssign<usize> for usizex8[src]

impl Debug for usizex8[src]

impl Default for usizex8[src]

impl Div<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl Div<usize> for usizex8[src]

type Output = Self

The resulting type after applying the / operator.

+

impl DivAssign<Simd<[usize; 8]>> for usizex8[src]

impl DivAssign<usize> for usizex8[src]

impl Eq for usizex8[src]

impl From<[usize; 8]> for usizex8[src]

impl FromCast<Simd<[f32; 8]>> for usizex8[src]

impl FromCast<Simd<[f64; 8]>> for usizex8[src]

impl FromCast<Simd<[i16; 8]>> for usizex8[src]

impl FromCast<Simd<[i32; 8]>> for usizex8[src]

impl FromCast<Simd<[i64; 8]>> for usizex8[src]

impl FromCast<Simd<[i8; 8]>> for usizex8[src]

impl FromCast<Simd<[isize; 8]>> for usizex8[src]

impl FromCast<Simd<[m16; 8]>> for usizex8[src]

impl FromCast<Simd<[m32; 8]>> for usizex8[src]

impl FromCast<Simd<[m64; 8]>> for usizex8[src]

impl FromCast<Simd<[m8; 8]>> for usizex8[src]

impl FromCast<Simd<[msize; 8]>> for usizex8[src]

impl FromCast<Simd<[u16; 8]>> for usizex8[src]

impl FromCast<Simd<[u32; 8]>> for usizex8[src]

impl FromCast<Simd<[u64; 8]>> for usizex8[src]

impl FromCast<Simd<[u8; 8]>> for usizex8[src]

impl Hash for usizex8[src]

impl LowerHex for usizex8[src]

impl Mul<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl Mul<usize> for usizex8[src]

type Output = Self

The resulting type after applying the * operator.

+

impl MulAssign<Simd<[usize; 8]>> for usizex8[src]

impl MulAssign<usize> for usizex8[src]

impl Not for usizex8[src]

type Output = Self

The resulting type after applying the ! operator.

+

impl Octal for usizex8[src]

impl PartialEq<Simd<[usize; 8]>> for usizex8[src]

impl<'a> Product<&'a Simd<[usize; 8]>> for usizex8[src]

impl Product<Simd<[usize; 8]>> for usizex8[src]

impl Rem<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl Rem<usize> for usizex8[src]

type Output = Self

The resulting type after applying the % operator.

+

impl RemAssign<Simd<[usize; 8]>> for usizex8[src]

impl RemAssign<usize> for usizex8[src]

impl Shl<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl Shl<u32> for usizex8[src]

type Output = Self

The resulting type after applying the << operator.

+

impl ShlAssign<Simd<[usize; 8]>> for usizex8[src]

impl ShlAssign<u32> for usizex8[src]

impl Shr<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl Shr<u32> for usizex8[src]

type Output = Self

The resulting type after applying the >> operator.

+

impl ShrAssign<Simd<[usize; 8]>> for usizex8[src]

impl ShrAssign<u32> for usizex8[src]

impl Simd for usizex8[src]

type Element = usize

Element type of the SIMD vector

+

type LanesType = [u32; 8]

The type: [u32; Self::N].

+

impl Sub<Simd<[usize; 8]>> for usizex8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl Sub<usize> for usizex8[src]

type Output = Self

The resulting type after applying the - operator.

+

impl SubAssign<Simd<[usize; 8]>> for usizex8[src]

impl SubAssign<usize> for usizex8[src]

impl<'a> Sum<&'a Simd<[usize; 8]>> for usizex8[src]

impl Sum<Simd<[usize; 8]>> for usizex8[src]

impl UpperHex for usizex8[src]

\ No newline at end of file diff --git a/packed_simd/v128/type.f32x4.html b/packed_simd/v128/type.f32x4.html new file mode 100644 index 000000000..fcff403b8 --- /dev/null +++ b/packed_simd/v128/type.f32x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.f32x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.f64x2.html b/packed_simd/v128/type.f64x2.html new file mode 100644 index 000000000..a3f21f33e --- /dev/null +++ b/packed_simd/v128/type.f64x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.f64x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.i128x1.html b/packed_simd/v128/type.i128x1.html new file mode 100644 index 000000000..2a0b8cb13 --- /dev/null +++ b/packed_simd/v128/type.i128x1.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i128x1.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.i16x8.html b/packed_simd/v128/type.i16x8.html new file mode 100644 index 000000000..b853d0ee0 --- /dev/null +++ b/packed_simd/v128/type.i16x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i16x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.i32x4.html b/packed_simd/v128/type.i32x4.html new file mode 100644 index 000000000..448205330 --- /dev/null +++ b/packed_simd/v128/type.i32x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i32x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.i64x2.html b/packed_simd/v128/type.i64x2.html new file mode 100644 index 000000000..8e018f5c2 --- /dev/null +++ b/packed_simd/v128/type.i64x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i64x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.i8x16.html b/packed_simd/v128/type.i8x16.html new file mode 100644 index 000000000..c227729ff --- /dev/null +++ b/packed_simd/v128/type.i8x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i8x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.m128x1.html b/packed_simd/v128/type.m128x1.html new file mode 100644 index 000000000..8e430ef8d --- /dev/null +++ b/packed_simd/v128/type.m128x1.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m128x1.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.m16x8.html b/packed_simd/v128/type.m16x8.html new file mode 100644 index 000000000..38609e661 --- /dev/null +++ b/packed_simd/v128/type.m16x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m16x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.m32x4.html b/packed_simd/v128/type.m32x4.html new file mode 100644 index 000000000..5f5ed9ded --- /dev/null +++ b/packed_simd/v128/type.m32x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m32x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.m64x2.html b/packed_simd/v128/type.m64x2.html new file mode 100644 index 000000000..ae12b8212 --- /dev/null +++ b/packed_simd/v128/type.m64x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m64x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.m8x16.html b/packed_simd/v128/type.m8x16.html new file mode 100644 index 000000000..b9374ec8a --- /dev/null +++ b/packed_simd/v128/type.m8x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m8x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.u128x1.html b/packed_simd/v128/type.u128x1.html new file mode 100644 index 000000000..c31b92a58 --- /dev/null +++ b/packed_simd/v128/type.u128x1.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u128x1.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.u16x8.html b/packed_simd/v128/type.u16x8.html new file mode 100644 index 000000000..0aab06016 --- /dev/null +++ b/packed_simd/v128/type.u16x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u16x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.u32x4.html b/packed_simd/v128/type.u32x4.html new file mode 100644 index 000000000..de8f251e2 --- /dev/null +++ b/packed_simd/v128/type.u32x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u32x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.u64x2.html b/packed_simd/v128/type.u64x2.html new file mode 100644 index 000000000..1f3006e4f --- /dev/null +++ b/packed_simd/v128/type.u64x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u64x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v128/type.u8x16.html b/packed_simd/v128/type.u8x16.html new file mode 100644 index 000000000..7daeed316 --- /dev/null +++ b/packed_simd/v128/type.u8x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u8x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v16/type.i8x2.html b/packed_simd/v16/type.i8x2.html new file mode 100644 index 000000000..93b857f5f --- /dev/null +++ b/packed_simd/v16/type.i8x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i8x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v16/type.m8x2.html b/packed_simd/v16/type.m8x2.html new file mode 100644 index 000000000..804a4f9f1 --- /dev/null +++ b/packed_simd/v16/type.m8x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m8x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v16/type.u8x2.html b/packed_simd/v16/type.u8x2.html new file mode 100644 index 000000000..ab91689f9 --- /dev/null +++ b/packed_simd/v16/type.u8x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u8x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.f32x8.html b/packed_simd/v256/type.f32x8.html new file mode 100644 index 000000000..b52b27634 --- /dev/null +++ b/packed_simd/v256/type.f32x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.f32x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.f64x4.html b/packed_simd/v256/type.f64x4.html new file mode 100644 index 000000000..b1428862a --- /dev/null +++ b/packed_simd/v256/type.f64x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.f64x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.i128x2.html b/packed_simd/v256/type.i128x2.html new file mode 100644 index 000000000..28046f954 --- /dev/null +++ b/packed_simd/v256/type.i128x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i128x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.i16x16.html b/packed_simd/v256/type.i16x16.html new file mode 100644 index 000000000..4c0684f0f --- /dev/null +++ b/packed_simd/v256/type.i16x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i16x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.i32x8.html b/packed_simd/v256/type.i32x8.html new file mode 100644 index 000000000..30bf10d3c --- /dev/null +++ b/packed_simd/v256/type.i32x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i32x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.i64x4.html b/packed_simd/v256/type.i64x4.html new file mode 100644 index 000000000..4af141b72 --- /dev/null +++ b/packed_simd/v256/type.i64x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i64x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.i8x32.html b/packed_simd/v256/type.i8x32.html new file mode 100644 index 000000000..163d6bbaa --- /dev/null +++ b/packed_simd/v256/type.i8x32.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i8x32.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.m128x2.html b/packed_simd/v256/type.m128x2.html new file mode 100644 index 000000000..3fb9d4f0f --- /dev/null +++ b/packed_simd/v256/type.m128x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m128x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.m16x16.html b/packed_simd/v256/type.m16x16.html new file mode 100644 index 000000000..7cadc6f95 --- /dev/null +++ b/packed_simd/v256/type.m16x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m16x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.m32x8.html b/packed_simd/v256/type.m32x8.html new file mode 100644 index 000000000..5f387ea36 --- /dev/null +++ b/packed_simd/v256/type.m32x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m32x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.m64x4.html b/packed_simd/v256/type.m64x4.html new file mode 100644 index 000000000..f98afaa3c --- /dev/null +++ b/packed_simd/v256/type.m64x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m64x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.m8x32.html b/packed_simd/v256/type.m8x32.html new file mode 100644 index 000000000..04d2be16d --- /dev/null +++ b/packed_simd/v256/type.m8x32.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m8x32.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.u128x2.html b/packed_simd/v256/type.u128x2.html new file mode 100644 index 000000000..e7411f397 --- /dev/null +++ b/packed_simd/v256/type.u128x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u128x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.u16x16.html b/packed_simd/v256/type.u16x16.html new file mode 100644 index 000000000..b62145000 --- /dev/null +++ b/packed_simd/v256/type.u16x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u16x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.u32x8.html b/packed_simd/v256/type.u32x8.html new file mode 100644 index 000000000..02bd446be --- /dev/null +++ b/packed_simd/v256/type.u32x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u32x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.u64x4.html b/packed_simd/v256/type.u64x4.html new file mode 100644 index 000000000..a2d2ab4e3 --- /dev/null +++ b/packed_simd/v256/type.u64x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u64x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v256/type.u8x32.html b/packed_simd/v256/type.u8x32.html new file mode 100644 index 000000000..2fd2a47e8 --- /dev/null +++ b/packed_simd/v256/type.u8x32.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u8x32.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v32/type.i16x2.html b/packed_simd/v32/type.i16x2.html new file mode 100644 index 000000000..7629009fa --- /dev/null +++ b/packed_simd/v32/type.i16x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i16x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v32/type.i8x4.html b/packed_simd/v32/type.i8x4.html new file mode 100644 index 000000000..12022bf58 --- /dev/null +++ b/packed_simd/v32/type.i8x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i8x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v32/type.m16x2.html b/packed_simd/v32/type.m16x2.html new file mode 100644 index 000000000..053181538 --- /dev/null +++ b/packed_simd/v32/type.m16x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m16x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v32/type.m8x4.html b/packed_simd/v32/type.m8x4.html new file mode 100644 index 000000000..96bb51366 --- /dev/null +++ b/packed_simd/v32/type.m8x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m8x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v32/type.u16x2.html b/packed_simd/v32/type.u16x2.html new file mode 100644 index 000000000..0584a1d5f --- /dev/null +++ b/packed_simd/v32/type.u16x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u16x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v32/type.u8x4.html b/packed_simd/v32/type.u8x4.html new file mode 100644 index 000000000..7a587f510 --- /dev/null +++ b/packed_simd/v32/type.u8x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u8x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.f32x16.html b/packed_simd/v512/type.f32x16.html new file mode 100644 index 000000000..5026be32b --- /dev/null +++ b/packed_simd/v512/type.f32x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.f32x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.f64x8.html b/packed_simd/v512/type.f64x8.html new file mode 100644 index 000000000..738e1f8bc --- /dev/null +++ b/packed_simd/v512/type.f64x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.f64x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.i128x4.html b/packed_simd/v512/type.i128x4.html new file mode 100644 index 000000000..20cb37545 --- /dev/null +++ b/packed_simd/v512/type.i128x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i128x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.i16x32.html b/packed_simd/v512/type.i16x32.html new file mode 100644 index 000000000..c40e2cc65 --- /dev/null +++ b/packed_simd/v512/type.i16x32.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i16x32.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.i32x16.html b/packed_simd/v512/type.i32x16.html new file mode 100644 index 000000000..d32ba0297 --- /dev/null +++ b/packed_simd/v512/type.i32x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i32x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.i64x8.html b/packed_simd/v512/type.i64x8.html new file mode 100644 index 000000000..1a1e04c3d --- /dev/null +++ b/packed_simd/v512/type.i64x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i64x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.i8x64.html b/packed_simd/v512/type.i8x64.html new file mode 100644 index 000000000..01ca5e3be --- /dev/null +++ b/packed_simd/v512/type.i8x64.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i8x64.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.m128x4.html b/packed_simd/v512/type.m128x4.html new file mode 100644 index 000000000..b50990b02 --- /dev/null +++ b/packed_simd/v512/type.m128x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m128x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.m16x32.html b/packed_simd/v512/type.m16x32.html new file mode 100644 index 000000000..a0df42e7d --- /dev/null +++ b/packed_simd/v512/type.m16x32.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m16x32.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.m32x16.html b/packed_simd/v512/type.m32x16.html new file mode 100644 index 000000000..0dedc3ad5 --- /dev/null +++ b/packed_simd/v512/type.m32x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m32x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.m64x8.html b/packed_simd/v512/type.m64x8.html new file mode 100644 index 000000000..998a9add5 --- /dev/null +++ b/packed_simd/v512/type.m64x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m64x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.m8x64.html b/packed_simd/v512/type.m8x64.html new file mode 100644 index 000000000..3df91e4bc --- /dev/null +++ b/packed_simd/v512/type.m8x64.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m8x64.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.u128x4.html b/packed_simd/v512/type.u128x4.html new file mode 100644 index 000000000..d99ff7768 --- /dev/null +++ b/packed_simd/v512/type.u128x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u128x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.u16x32.html b/packed_simd/v512/type.u16x32.html new file mode 100644 index 000000000..a2dc339ff --- /dev/null +++ b/packed_simd/v512/type.u16x32.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u16x32.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.u32x16.html b/packed_simd/v512/type.u32x16.html new file mode 100644 index 000000000..fa4a879bf --- /dev/null +++ b/packed_simd/v512/type.u32x16.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u32x16.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.u64x8.html b/packed_simd/v512/type.u64x8.html new file mode 100644 index 000000000..03ae85a96 --- /dev/null +++ b/packed_simd/v512/type.u64x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u64x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v512/type.u8x64.html b/packed_simd/v512/type.u8x64.html new file mode 100644 index 000000000..fb0a50d8a --- /dev/null +++ b/packed_simd/v512/type.u8x64.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u8x64.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.f32x2.html b/packed_simd/v64/type.f32x2.html new file mode 100644 index 000000000..2671e8b38 --- /dev/null +++ b/packed_simd/v64/type.f32x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.f32x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.i16x4.html b/packed_simd/v64/type.i16x4.html new file mode 100644 index 000000000..92c3a966b --- /dev/null +++ b/packed_simd/v64/type.i16x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i16x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.i32x2.html b/packed_simd/v64/type.i32x2.html new file mode 100644 index 000000000..12d5ddd27 --- /dev/null +++ b/packed_simd/v64/type.i32x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i32x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.i8x8.html b/packed_simd/v64/type.i8x8.html new file mode 100644 index 000000000..cce206444 --- /dev/null +++ b/packed_simd/v64/type.i8x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.i8x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.m16x4.html b/packed_simd/v64/type.m16x4.html new file mode 100644 index 000000000..c3a3fbe49 --- /dev/null +++ b/packed_simd/v64/type.m16x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m16x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.m32x2.html b/packed_simd/v64/type.m32x2.html new file mode 100644 index 000000000..4a95692b1 --- /dev/null +++ b/packed_simd/v64/type.m32x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m32x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.m8x8.html b/packed_simd/v64/type.m8x8.html new file mode 100644 index 000000000..13c6dbd50 --- /dev/null +++ b/packed_simd/v64/type.m8x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.m8x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.u16x4.html b/packed_simd/v64/type.u16x4.html new file mode 100644 index 000000000..680831795 --- /dev/null +++ b/packed_simd/v64/type.u16x4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u16x4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.u32x2.html b/packed_simd/v64/type.u32x2.html new file mode 100644 index 000000000..01023a26c --- /dev/null +++ b/packed_simd/v64/type.u32x2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u32x2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/v64/type.u8x8.html b/packed_simd/v64/type.u8x8.html new file mode 100644 index 000000000..a798604ae --- /dev/null +++ b/packed_simd/v64/type.u8x8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.u8x8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vPtr/type.cptrx2.html b/packed_simd/vPtr/type.cptrx2.html new file mode 100644 index 000000000..2fd9b09c5 --- /dev/null +++ b/packed_simd/vPtr/type.cptrx2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.cptrx2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vPtr/type.cptrx4.html b/packed_simd/vPtr/type.cptrx4.html new file mode 100644 index 000000000..4cc1953aa --- /dev/null +++ b/packed_simd/vPtr/type.cptrx4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.cptrx4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vPtr/type.cptrx8.html b/packed_simd/vPtr/type.cptrx8.html new file mode 100644 index 000000000..b3e32bf4f --- /dev/null +++ b/packed_simd/vPtr/type.cptrx8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.cptrx8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vPtr/type.mptrx2.html b/packed_simd/vPtr/type.mptrx2.html new file mode 100644 index 000000000..12f2a9860 --- /dev/null +++ b/packed_simd/vPtr/type.mptrx2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.mptrx2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vPtr/type.mptrx4.html b/packed_simd/vPtr/type.mptrx4.html new file mode 100644 index 000000000..930ef4892 --- /dev/null +++ b/packed_simd/vPtr/type.mptrx4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.mptrx4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vPtr/type.mptrx8.html b/packed_simd/vPtr/type.mptrx8.html new file mode 100644 index 000000000..1659cf5cd --- /dev/null +++ b/packed_simd/vPtr/type.mptrx8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.mptrx8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.isizex2.html b/packed_simd/vSize/type.isizex2.html new file mode 100644 index 000000000..07be35f61 --- /dev/null +++ b/packed_simd/vSize/type.isizex2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.isizex2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.isizex4.html b/packed_simd/vSize/type.isizex4.html new file mode 100644 index 000000000..9070bc824 --- /dev/null +++ b/packed_simd/vSize/type.isizex4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.isizex4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.isizex8.html b/packed_simd/vSize/type.isizex8.html new file mode 100644 index 000000000..5eb9243be --- /dev/null +++ b/packed_simd/vSize/type.isizex8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.isizex8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.msizex2.html b/packed_simd/vSize/type.msizex2.html new file mode 100644 index 000000000..e3094dd2b --- /dev/null +++ b/packed_simd/vSize/type.msizex2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.msizex2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.msizex4.html b/packed_simd/vSize/type.msizex4.html new file mode 100644 index 000000000..a0cabfa72 --- /dev/null +++ b/packed_simd/vSize/type.msizex4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.msizex4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.msizex8.html b/packed_simd/vSize/type.msizex8.html new file mode 100644 index 000000000..c387857c6 --- /dev/null +++ b/packed_simd/vSize/type.msizex8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.msizex8.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.usizex2.html b/packed_simd/vSize/type.usizex2.html new file mode 100644 index 000000000..386892a37 --- /dev/null +++ b/packed_simd/vSize/type.usizex2.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.usizex2.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.usizex4.html b/packed_simd/vSize/type.usizex4.html new file mode 100644 index 000000000..9e01ae0c6 --- /dev/null +++ b/packed_simd/vSize/type.usizex4.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.usizex4.html...

+ + + \ No newline at end of file diff --git a/packed_simd/vSize/type.usizex8.html b/packed_simd/vSize/type.usizex8.html new file mode 100644 index 000000000..c70ad1289 --- /dev/null +++ b/packed_simd/vSize/type.usizex8.html @@ -0,0 +1,10 @@ + + + + + + +

Redirecting to ../../packed_simd/type.usizex8.html...

+ + + \ No newline at end of file diff --git a/perf-guide/.nojekyll b/perf-guide/.nojekyll new file mode 100644 index 000000000..863121594 --- /dev/null +++ b/perf-guide/.nojekyll @@ -0,0 +1 @@ +This file makes sure that Github Pages doesn't process mdBook's output. \ No newline at end of file diff --git a/perf-guide/404.html b/perf-guide/404.html new file mode 100644 index 000000000..d509e23dc --- /dev/null +++ b/perf-guide/404.html @@ -0,0 +1,216 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Document not found (404)

+

This URL is invalid, sorry. Please use the navigation bar or search to continue.

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/FontAwesome/css/font-awesome.css b/perf-guide/FontAwesome/css/font-awesome.css new file mode 100644 index 000000000..540440ce8 --- /dev/null +++ b/perf-guide/FontAwesome/css/font-awesome.css @@ -0,0 +1,4 @@ +/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.7.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.7.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.7.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.7.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-intersex:before,.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-genderless:before{content:"\f22d"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"}.fa-yc:before,.fa-y-combinator:before{content:"\f23b"}.fa-optin-monster:before{content:"\f23c"}.fa-opencart:before{content:"\f23d"}.fa-expeditedssl:before{content:"\f23e"}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:"\f240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-battery-2:before,.fa-battery-half:before{content:"\f242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\f243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-mouse-pointer:before{content:"\f245"}.fa-i-cursor:before{content:"\f246"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-sticky-note:before{content:"\f249"}.fa-sticky-note-o:before{content:"\f24a"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-diners-club:before{content:"\f24c"}.fa-clone:before{content:"\f24d"}.fa-balance-scale:before{content:"\f24e"}.fa-hourglass-o:before{content:"\f250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-hourglass:before{content:"\f254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\f255"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:"\f256"}.fa-hand-scissors-o:before{content:"\f257"}.fa-hand-lizard-o:before{content:"\f258"}.fa-hand-spock-o:before{content:"\f259"}.fa-hand-pointer-o:before{content:"\f25a"}.fa-hand-peace-o:before{content:"\f25b"}.fa-trademark:before{content:"\f25c"}.fa-registered:before{content:"\f25d"}.fa-creative-commons:before{content:"\f25e"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-tripadvisor:before{content:"\f262"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-get-pocket:before{content:"\f265"}.fa-wikipedia-w:before{content:"\f266"}.fa-safari:before{content:"\f267"}.fa-chrome:before{content:"\f268"}.fa-firefox:before{content:"\f269"}.fa-opera:before{content:"\f26a"}.fa-internet-explorer:before{content:"\f26b"}.fa-tv:before,.fa-television:before{content:"\f26c"}.fa-contao:before{content:"\f26d"}.fa-500px:before{content:"\f26e"}.fa-amazon:before{content:"\f270"}.fa-calendar-plus-o:before{content:"\f271"}.fa-calendar-minus-o:before{content:"\f272"}.fa-calendar-times-o:before{content:"\f273"}.fa-calendar-check-o:before{content:"\f274"}.fa-industry:before{content:"\f275"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-map-o:before{content:"\f278"}.fa-map:before{content:"\f279"}.fa-commenting:before{content:"\f27a"}.fa-commenting-o:before{content:"\f27b"}.fa-houzz:before{content:"\f27c"}.fa-vimeo:before{content:"\f27d"}.fa-black-tie:before{content:"\f27e"}.fa-fonticons:before{content:"\f280"}.fa-reddit-alien:before{content:"\f281"}.fa-edge:before{content:"\f282"}.fa-credit-card-alt:before{content:"\f283"}.fa-codiepie:before{content:"\f284"}.fa-modx:before{content:"\f285"}.fa-fort-awesome:before{content:"\f286"}.fa-usb:before{content:"\f287"}.fa-product-hunt:before{content:"\f288"}.fa-mixcloud:before{content:"\f289"}.fa-scribd:before{content:"\f28a"}.fa-pause-circle:before{content:"\f28b"}.fa-pause-circle-o:before{content:"\f28c"}.fa-stop-circle:before{content:"\f28d"}.fa-stop-circle-o:before{content:"\f28e"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-hashtag:before{content:"\f292"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-percent:before{content:"\f295"}.fa-gitlab:before{content:"\f296"}.fa-wpbeginner:before{content:"\f297"}.fa-wpforms:before{content:"\f298"}.fa-envira:before{content:"\f299"}.fa-universal-access:before{content:"\f29a"}.fa-wheelchair-alt:before{content:"\f29b"}.fa-question-circle-o:before{content:"\f29c"}.fa-blind:before{content:"\f29d"}.fa-audio-description:before{content:"\f29e"}.fa-volume-control-phone:before{content:"\f2a0"}.fa-braille:before{content:"\f2a1"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:"\f2a4"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-signing:before,.fa-sign-language:before{content:"\f2a7"}.fa-low-vision:before{content:"\f2a8"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-pied-piper:before{content:"\f2ae"}.fa-first-order:before{content:"\f2b0"}.fa-yoast:before{content:"\f2b1"}.fa-themeisle:before{content:"\f2b2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\f2b3"}.fa-fa:before,.fa-font-awesome:before{content:"\f2b4"}.fa-handshake-o:before{content:"\f2b5"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-o:before{content:"\f2b7"}.fa-linode:before{content:"\f2b8"}.fa-address-book:before{content:"\f2b9"}.fa-address-book-o:before{content:"\f2ba"}.fa-vcard:before,.fa-address-card:before{content:"\f2bb"}.fa-vcard-o:before,.fa-address-card-o:before{content:"\f2bc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-circle-o:before{content:"\f2be"}.fa-user-o:before{content:"\f2c0"}.fa-id-badge:before{content:"\f2c1"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\f2c3"}.fa-quora:before{content:"\f2c4"}.fa-free-code-camp:before{content:"\f2c5"}.fa-telegram:before{content:"\f2c6"}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-shower:before{content:"\f2cc"}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:"\f2cd"}.fa-podcast:before{content:"\f2ce"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\f2d3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\f2d4"}.fa-bandcamp:before{content:"\f2d5"}.fa-grav:before{content:"\f2d6"}.fa-etsy:before{content:"\f2d7"}.fa-imdb:before{content:"\f2d8"}.fa-ravelry:before{content:"\f2d9"}.fa-eercast:before{content:"\f2da"}.fa-microchip:before{content:"\f2db"}.fa-snowflake-o:before{content:"\f2dc"}.fa-superpowers:before{content:"\f2dd"}.fa-wpexplorer:before{content:"\f2de"}.fa-meetup:before{content:"\f2e0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto} diff --git a/perf-guide/FontAwesome/fonts/FontAwesome.ttf b/perf-guide/FontAwesome/fonts/FontAwesome.ttf new file mode 100644 index 000000000..35acda2fa Binary files /dev/null and b/perf-guide/FontAwesome/fonts/FontAwesome.ttf differ diff --git a/perf-guide/FontAwesome/fonts/fontawesome-webfont.eot b/perf-guide/FontAwesome/fonts/fontawesome-webfont.eot new file mode 100644 index 000000000..e9f60ca95 Binary files /dev/null and b/perf-guide/FontAwesome/fonts/fontawesome-webfont.eot differ diff --git a/perf-guide/FontAwesome/fonts/fontawesome-webfont.svg b/perf-guide/FontAwesome/fonts/fontawesome-webfont.svg new file mode 100644 index 000000000..855c845e5 --- /dev/null +++ b/perf-guide/FontAwesome/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/FontAwesome/fonts/fontawesome-webfont.ttf b/perf-guide/FontAwesome/fonts/fontawesome-webfont.ttf new file mode 100644 index 000000000..35acda2fa Binary files /dev/null and b/perf-guide/FontAwesome/fonts/fontawesome-webfont.ttf differ diff --git a/perf-guide/FontAwesome/fonts/fontawesome-webfont.woff b/perf-guide/FontAwesome/fonts/fontawesome-webfont.woff new file mode 100644 index 000000000..400014a4b Binary files /dev/null and b/perf-guide/FontAwesome/fonts/fontawesome-webfont.woff differ diff --git a/perf-guide/FontAwesome/fonts/fontawesome-webfont.woff2 b/perf-guide/FontAwesome/fonts/fontawesome-webfont.woff2 new file mode 100644 index 000000000..4d13fc604 Binary files /dev/null and b/perf-guide/FontAwesome/fonts/fontawesome-webfont.woff2 differ diff --git a/perf-guide/ascii.css b/perf-guide/ascii.css new file mode 100644 index 000000000..4c0265119 --- /dev/null +++ b/perf-guide/ascii.css @@ -0,0 +1,4 @@ +code { + /* "Source Code Pro" breaks ASCII art */ + font-family: Consolas, "Ubuntu Mono", Menlo, "DejaVu Sans Mono", monospace; +} diff --git a/perf-guide/ayu-highlight.css b/perf-guide/ayu-highlight.css new file mode 100644 index 000000000..0c45c6f14 --- /dev/null +++ b/perf-guide/ayu-highlight.css @@ -0,0 +1,79 @@ +/* +Based off of the Ayu theme +Original by Dempfi (https://github.com/dempfi/ayu) +*/ + +.hljs { + display: block; + overflow-x: auto; + background: #191f26; + color: #e6e1cf; + padding: 0.5em; +} + +.hljs-comment, +.hljs-quote { + color: #5c6773; + font-style: italic; +} + +.hljs-variable, +.hljs-template-variable, +.hljs-attribute, +.hljs-attr, +.hljs-regexp, +.hljs-link, +.hljs-selector-id, +.hljs-selector-class { + color: #ff7733; +} + +.hljs-number, +.hljs-meta, +.hljs-builtin-name, +.hljs-literal, +.hljs-type, +.hljs-params { + color: #ffee99; +} + +.hljs-string, +.hljs-bullet { + color: #b8cc52; +} + +.hljs-title, +.hljs-built_in, +.hljs-section { + color: #ffb454; +} + +.hljs-keyword, +.hljs-selector-tag, +.hljs-symbol { + color: #ff7733; +} + +.hljs-name { + color: #36a3d9; +} + +.hljs-tag { + color: #00568d; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} + +.hljs-addition { + color: #91b362; +} + +.hljs-deletion { + color: #d96c75; +} diff --git a/perf-guide/book.js b/perf-guide/book.js new file mode 100644 index 000000000..049505fc6 --- /dev/null +++ b/perf-guide/book.js @@ -0,0 +1,660 @@ +"use strict"; + +// Fix back button cache problem +window.onunload = function () { }; + +// Global variable, shared between modules +function playground_text(playground) { + let code_block = playground.querySelector("code"); + + if (window.ace && code_block.classList.contains("editable")) { + let editor = window.ace.edit(code_block); + return editor.getValue(); + } else { + return code_block.textContent; + } +} + +(function codeSnippets() { + function fetch_with_timeout(url, options, timeout = 6000) { + return Promise.race([ + fetch(url, options), + new Promise((_, reject) => setTimeout(() => reject(new Error('timeout')), timeout)) + ]); + } + + var playgrounds = Array.from(document.querySelectorAll(".playground")); + if (playgrounds.length > 0) { + fetch_with_timeout("https://play.rust-lang.org/meta/crates", { + headers: { + 'Content-Type': "application/json", + }, + method: 'POST', + mode: 'cors', + }) + .then(response => response.json()) + .then(response => { + // get list of crates available in the rust playground + let playground_crates = response.crates.map(item => item["id"]); + playgrounds.forEach(block => handle_crate_list_update(block, playground_crates)); + }); + } + + function handle_crate_list_update(playground_block, playground_crates) { + // update the play buttons after receiving the response + update_play_button(playground_block, playground_crates); + + // and install on change listener to dynamically update ACE editors + if (window.ace) { + let code_block = playground_block.querySelector("code"); + if (code_block.classList.contains("editable")) { + let editor = window.ace.edit(code_block); + editor.addEventListener("change", function (e) { + update_play_button(playground_block, playground_crates); + }); + // add Ctrl-Enter command to execute rust code + editor.commands.addCommand({ + name: "run", + bindKey: { + win: "Ctrl-Enter", + mac: "Ctrl-Enter" + }, + exec: _editor => run_rust_code(playground_block) + }); + } + } + } + + // updates the visibility of play button based on `no_run` class and + // used crates vs ones available on http://play.rust-lang.org + function update_play_button(pre_block, playground_crates) { + var play_button = pre_block.querySelector(".play-button"); + + // skip if code is `no_run` + if (pre_block.querySelector('code').classList.contains("no_run")) { + play_button.classList.add("hidden"); + return; + } + + // get list of `extern crate`'s from snippet + var txt = playground_text(pre_block); + var re = /extern\s+crate\s+([a-zA-Z_0-9]+)\s*;/g; + var snippet_crates = []; + var item; + while (item = re.exec(txt)) { + snippet_crates.push(item[1]); + } + + // check if all used crates are available on play.rust-lang.org + var all_available = snippet_crates.every(function (elem) { + return playground_crates.indexOf(elem) > -1; + }); + + if (all_available) { + play_button.classList.remove("hidden"); + } else { + play_button.classList.add("hidden"); + } + } + + function run_rust_code(code_block) { + var result_block = code_block.querySelector(".result"); + if (!result_block) { + result_block = document.createElement('code'); + result_block.className = 'result hljs language-bash'; + + code_block.append(result_block); + } + + let text = playground_text(code_block); + let classes = code_block.querySelector('code').classList; + let has_2018 = classes.contains("edition2018"); + let edition = has_2018 ? "2018" : "2015"; + + var params = { + version: "stable", + optimize: "0", + code: text, + edition: edition + }; + + if (text.indexOf("#![feature") !== -1) { + params.version = "nightly"; + } + + result_block.innerText = "Running..."; + + fetch_with_timeout("https://play.rust-lang.org/evaluate.json", { + headers: { + 'Content-Type': "application/json", + }, + method: 'POST', + mode: 'cors', + body: JSON.stringify(params) + }) + .then(response => response.json()) + .then(response => result_block.innerText = response.result) + .catch(error => result_block.innerText = "Playground Communication: " + error.message); + } + + // Syntax highlighting Configuration + hljs.configure({ + tabReplace: ' ', // 4 spaces + languages: [], // Languages used for auto-detection + }); + + let code_nodes = Array + .from(document.querySelectorAll('code')) + // Don't highlight `inline code` blocks in headers. + .filter(function (node) {return !node.parentElement.classList.contains("header"); }); + + if (window.ace) { + // language-rust class needs to be removed for editable + // blocks or highlightjs will capture events + Array + .from(document.querySelectorAll('code.editable')) + .forEach(function (block) { block.classList.remove('language-rust'); }); + + Array + .from(document.querySelectorAll('code:not(.editable)')) + .forEach(function (block) { hljs.highlightBlock(block); }); + } else { + code_nodes.forEach(function (block) { hljs.highlightBlock(block); }); + } + + // Adding the hljs class gives code blocks the color css + // even if highlighting doesn't apply + code_nodes.forEach(function (block) { block.classList.add('hljs'); }); + + Array.from(document.querySelectorAll("code.language-rust")).forEach(function (block) { + + var lines = Array.from(block.querySelectorAll('.boring')); + // If no lines were hidden, return + if (!lines.length) { return; } + block.classList.add("hide-boring"); + + var buttons = document.createElement('div'); + buttons.className = 'buttons'; + buttons.innerHTML = ""; + + // add expand button + var pre_block = block.parentNode; + pre_block.insertBefore(buttons, pre_block.firstChild); + + pre_block.querySelector('.buttons').addEventListener('click', function (e) { + if (e.target.classList.contains('fa-expand')) { + e.target.classList.remove('fa-expand'); + e.target.classList.add('fa-compress'); + e.target.title = 'Hide lines'; + e.target.setAttribute('aria-label', e.target.title); + + block.classList.remove('hide-boring'); + } else if (e.target.classList.contains('fa-compress')) { + e.target.classList.remove('fa-compress'); + e.target.classList.add('fa-expand'); + e.target.title = 'Show hidden lines'; + e.target.setAttribute('aria-label', e.target.title); + + block.classList.add('hide-boring'); + } + }); + }); + + if (window.playground_copyable) { + Array.from(document.querySelectorAll('pre code')).forEach(function (block) { + var pre_block = block.parentNode; + if (!pre_block.classList.contains('playground')) { + var buttons = pre_block.querySelector(".buttons"); + if (!buttons) { + buttons = document.createElement('div'); + buttons.className = 'buttons'; + pre_block.insertBefore(buttons, pre_block.firstChild); + } + + var clipButton = document.createElement('button'); + clipButton.className = 'fa fa-copy clip-button'; + clipButton.title = 'Copy to clipboard'; + clipButton.setAttribute('aria-label', clipButton.title); + clipButton.innerHTML = ''; + + buttons.insertBefore(clipButton, buttons.firstChild); + } + }); + } + + // Process playground code blocks + Array.from(document.querySelectorAll(".playground")).forEach(function (pre_block) { + // Add play button + var buttons = pre_block.querySelector(".buttons"); + if (!buttons) { + buttons = document.createElement('div'); + buttons.className = 'buttons'; + pre_block.insertBefore(buttons, pre_block.firstChild); + } + + var runCodeButton = document.createElement('button'); + runCodeButton.className = 'fa fa-play play-button'; + runCodeButton.hidden = true; + runCodeButton.title = 'Run this code'; + runCodeButton.setAttribute('aria-label', runCodeButton.title); + + buttons.insertBefore(runCodeButton, buttons.firstChild); + runCodeButton.addEventListener('click', function (e) { + run_rust_code(pre_block); + }); + + if (window.playground_copyable) { + var copyCodeClipboardButton = document.createElement('button'); + copyCodeClipboardButton.className = 'fa fa-copy clip-button'; + copyCodeClipboardButton.innerHTML = ''; + copyCodeClipboardButton.title = 'Copy to clipboard'; + copyCodeClipboardButton.setAttribute('aria-label', copyCodeClipboardButton.title); + + buttons.insertBefore(copyCodeClipboardButton, buttons.firstChild); + } + + let code_block = pre_block.querySelector("code"); + if (window.ace && code_block.classList.contains("editable")) { + var undoChangesButton = document.createElement('button'); + undoChangesButton.className = 'fa fa-history reset-button'; + undoChangesButton.title = 'Undo changes'; + undoChangesButton.setAttribute('aria-label', undoChangesButton.title); + + buttons.insertBefore(undoChangesButton, buttons.firstChild); + + undoChangesButton.addEventListener('click', function () { + let editor = window.ace.edit(code_block); + editor.setValue(editor.originalCode); + editor.clearSelection(); + }); + } + }); +})(); + +(function themes() { + var html = document.querySelector('html'); + var themeToggleButton = document.getElementById('theme-toggle'); + var themePopup = document.getElementById('theme-list'); + var themeColorMetaTag = document.querySelector('meta[name="theme-color"]'); + var stylesheets = { + ayuHighlight: document.querySelector("[href$='ayu-highlight.css']"), + tomorrowNight: document.querySelector("[href$='tomorrow-night.css']"), + highlight: document.querySelector("[href$='highlight.css']"), + }; + + function showThemes() { + themePopup.style.display = 'block'; + themeToggleButton.setAttribute('aria-expanded', true); + themePopup.querySelector("button#" + get_theme()).focus(); + } + + function hideThemes() { + themePopup.style.display = 'none'; + themeToggleButton.setAttribute('aria-expanded', false); + themeToggleButton.focus(); + } + + function get_theme() { + var theme; + try { theme = localStorage.getItem('mdbook-theme'); } catch (e) { } + if (theme === null || theme === undefined) { + return default_theme; + } else { + return theme; + } + } + + function set_theme(theme, store = true) { + let ace_theme; + + if (theme == 'coal' || theme == 'navy') { + stylesheets.ayuHighlight.disabled = true; + stylesheets.tomorrowNight.disabled = false; + stylesheets.highlight.disabled = true; + + ace_theme = "ace/theme/tomorrow_night"; + } else if (theme == 'ayu') { + stylesheets.ayuHighlight.disabled = false; + stylesheets.tomorrowNight.disabled = true; + stylesheets.highlight.disabled = true; + ace_theme = "ace/theme/tomorrow_night"; + } else { + stylesheets.ayuHighlight.disabled = true; + stylesheets.tomorrowNight.disabled = true; + stylesheets.highlight.disabled = false; + ace_theme = "ace/theme/dawn"; + } + + setTimeout(function () { + themeColorMetaTag.content = getComputedStyle(document.body).backgroundColor; + }, 1); + + if (window.ace && window.editors) { + window.editors.forEach(function (editor) { + editor.setTheme(ace_theme); + }); + } + + var previousTheme = get_theme(); + + if (store) { + try { localStorage.setItem('mdbook-theme', theme); } catch (e) { } + } + + html.classList.remove(previousTheme); + html.classList.add(theme); + } + + // Set theme + var theme = get_theme(); + + set_theme(theme, false); + + themeToggleButton.addEventListener('click', function () { + if (themePopup.style.display === 'block') { + hideThemes(); + } else { + showThemes(); + } + }); + + themePopup.addEventListener('click', function (e) { + var theme = e.target.id || e.target.parentElement.id; + set_theme(theme); + }); + + themePopup.addEventListener('focusout', function(e) { + // e.relatedTarget is null in Safari and Firefox on macOS (see workaround below) + if (!!e.relatedTarget && !themeToggleButton.contains(e.relatedTarget) && !themePopup.contains(e.relatedTarget)) { + hideThemes(); + } + }); + + // Should not be needed, but it works around an issue on macOS & iOS: https://github.com/rust-lang/mdBook/issues/628 + document.addEventListener('click', function(e) { + if (themePopup.style.display === 'block' && !themeToggleButton.contains(e.target) && !themePopup.contains(e.target)) { + hideThemes(); + } + }); + + document.addEventListener('keydown', function (e) { + if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; } + if (!themePopup.contains(e.target)) { return; } + + switch (e.key) { + case 'Escape': + e.preventDefault(); + hideThemes(); + break; + case 'ArrowUp': + e.preventDefault(); + var li = document.activeElement.parentElement; + if (li && li.previousElementSibling) { + li.previousElementSibling.querySelector('button').focus(); + } + break; + case 'ArrowDown': + e.preventDefault(); + var li = document.activeElement.parentElement; + if (li && li.nextElementSibling) { + li.nextElementSibling.querySelector('button').focus(); + } + break; + case 'Home': + e.preventDefault(); + themePopup.querySelector('li:first-child button').focus(); + break; + case 'End': + e.preventDefault(); + themePopup.querySelector('li:last-child button').focus(); + break; + } + }); +})(); + +(function sidebar() { + var html = document.querySelector("html"); + var sidebar = document.getElementById("sidebar"); + var sidebarLinks = document.querySelectorAll('#sidebar a'); + var sidebarToggleButton = document.getElementById("sidebar-toggle"); + var sidebarResizeHandle = document.getElementById("sidebar-resize-handle"); + var firstContact = null; + + function showSidebar() { + html.classList.remove('sidebar-hidden') + html.classList.add('sidebar-visible'); + Array.from(sidebarLinks).forEach(function (link) { + link.setAttribute('tabIndex', 0); + }); + sidebarToggleButton.setAttribute('aria-expanded', true); + sidebar.setAttribute('aria-hidden', false); + try { localStorage.setItem('mdbook-sidebar', 'visible'); } catch (e) { } + } + + + var sidebarAnchorToggles = document.querySelectorAll('#sidebar a.toggle'); + + function toggleSection(ev) { + ev.currentTarget.parentElement.classList.toggle('expanded'); + } + + Array.from(sidebarAnchorToggles).forEach(function (el) { + el.addEventListener('click', toggleSection); + }); + + function hideSidebar() { + html.classList.remove('sidebar-visible') + html.classList.add('sidebar-hidden'); + Array.from(sidebarLinks).forEach(function (link) { + link.setAttribute('tabIndex', -1); + }); + sidebarToggleButton.setAttribute('aria-expanded', false); + sidebar.setAttribute('aria-hidden', true); + try { localStorage.setItem('mdbook-sidebar', 'hidden'); } catch (e) { } + } + + // Toggle sidebar + sidebarToggleButton.addEventListener('click', function sidebarToggle() { + if (html.classList.contains("sidebar-hidden")) { + var current_width = parseInt( + document.documentElement.style.getPropertyValue('--sidebar-width'), 10); + if (current_width < 150) { + document.documentElement.style.setProperty('--sidebar-width', '150px'); + } + showSidebar(); + } else if (html.classList.contains("sidebar-visible")) { + hideSidebar(); + } else { + if (getComputedStyle(sidebar)['transform'] === 'none') { + hideSidebar(); + } else { + showSidebar(); + } + } + }); + + sidebarResizeHandle.addEventListener('mousedown', initResize, false); + + function initResize(e) { + window.addEventListener('mousemove', resize, false); + window.addEventListener('mouseup', stopResize, false); + html.classList.add('sidebar-resizing'); + } + function resize(e) { + var pos = (e.clientX - sidebar.offsetLeft); + if (pos < 20) { + hideSidebar(); + } else { + if (html.classList.contains("sidebar-hidden")) { + showSidebar(); + } + pos = Math.min(pos, window.innerWidth - 100); + document.documentElement.style.setProperty('--sidebar-width', pos + 'px'); + } + } + //on mouseup remove windows functions mousemove & mouseup + function stopResize(e) { + html.classList.remove('sidebar-resizing'); + window.removeEventListener('mousemove', resize, false); + window.removeEventListener('mouseup', stopResize, false); + } + + document.addEventListener('touchstart', function (e) { + firstContact = { + x: e.touches[0].clientX, + time: Date.now() + }; + }, { passive: true }); + + document.addEventListener('touchmove', function (e) { + if (!firstContact) + return; + + var curX = e.touches[0].clientX; + var xDiff = curX - firstContact.x, + tDiff = Date.now() - firstContact.time; + + if (tDiff < 250 && Math.abs(xDiff) >= 150) { + if (xDiff >= 0 && firstContact.x < Math.min(document.body.clientWidth * 0.25, 300)) + showSidebar(); + else if (xDiff < 0 && curX < 300) + hideSidebar(); + + firstContact = null; + } + }, { passive: true }); + + // Scroll sidebar to current active section + var activeSection = document.getElementById("sidebar").querySelector(".active"); + if (activeSection) { + // https://developer.mozilla.org/en-US/docs/Web/API/Element/scrollIntoView + activeSection.scrollIntoView({ block: 'center' }); + } +})(); + +(function chapterNavigation() { + document.addEventListener('keydown', function (e) { + if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey) { return; } + if (window.search && window.search.hasFocus()) { return; } + + switch (e.key) { + case 'ArrowRight': + e.preventDefault(); + var nextButton = document.querySelector('.nav-chapters.next'); + if (nextButton) { + window.location.href = nextButton.href; + } + break; + case 'ArrowLeft': + e.preventDefault(); + var previousButton = document.querySelector('.nav-chapters.previous'); + if (previousButton) { + window.location.href = previousButton.href; + } + break; + } + }); +})(); + +(function clipboard() { + var clipButtons = document.querySelectorAll('.clip-button'); + + function hideTooltip(elem) { + elem.firstChild.innerText = ""; + elem.className = 'fa fa-copy clip-button'; + } + + function showTooltip(elem, msg) { + elem.firstChild.innerText = msg; + elem.className = 'fa fa-copy tooltipped'; + } + + var clipboardSnippets = new ClipboardJS('.clip-button', { + text: function (trigger) { + hideTooltip(trigger); + let playground = trigger.closest("pre"); + return playground_text(playground); + } + }); + + Array.from(clipButtons).forEach(function (clipButton) { + clipButton.addEventListener('mouseout', function (e) { + hideTooltip(e.currentTarget); + }); + }); + + clipboardSnippets.on('success', function (e) { + e.clearSelection(); + showTooltip(e.trigger, "Copied!"); + }); + + clipboardSnippets.on('error', function (e) { + showTooltip(e.trigger, "Clipboard error!"); + }); +})(); + +(function scrollToTop () { + var menuTitle = document.querySelector('.menu-title'); + + menuTitle.addEventListener('click', function () { + document.scrollingElement.scrollTo({ top: 0, behavior: 'smooth' }); + }); +})(); + +(function controllMenu() { + var menu = document.getElementById('menu-bar'); + + (function controllPosition() { + var scrollTop = document.scrollingElement.scrollTop; + var prevScrollTop = scrollTop; + var minMenuY = -menu.clientHeight - 50; + // When the script loads, the page can be at any scroll (e.g. if you reforesh it). + menu.style.top = scrollTop + 'px'; + // Same as parseInt(menu.style.top.slice(0, -2), but faster + var topCache = menu.style.top.slice(0, -2); + menu.classList.remove('sticky'); + var stickyCache = false; // Same as menu.classList.contains('sticky'), but faster + document.addEventListener('scroll', function () { + scrollTop = Math.max(document.scrollingElement.scrollTop, 0); + // `null` means that it doesn't need to be updated + var nextSticky = null; + var nextTop = null; + var scrollDown = scrollTop > prevScrollTop; + var menuPosAbsoluteY = topCache - scrollTop; + if (scrollDown) { + nextSticky = false; + if (menuPosAbsoluteY > 0) { + nextTop = prevScrollTop; + } + } else { + if (menuPosAbsoluteY > 0) { + nextSticky = true; + } else if (menuPosAbsoluteY < minMenuY) { + nextTop = prevScrollTop + minMenuY; + } + } + if (nextSticky === true && stickyCache === false) { + menu.classList.add('sticky'); + stickyCache = true; + } else if (nextSticky === false && stickyCache === true) { + menu.classList.remove('sticky'); + stickyCache = false; + } + if (nextTop !== null) { + menu.style.top = nextTop + 'px'; + topCache = nextTop; + } + prevScrollTop = scrollTop; + }, { passive: true }); + })(); + (function controllBorder() { + menu.classList.remove('bordered'); + document.addEventListener('scroll', function () { + if (menu.offsetTop === 0) { + menu.classList.remove('bordered'); + } else { + menu.classList.add('bordered'); + } + }, { passive: true }); + })(); +})(); diff --git a/perf-guide/bound_checks.html b/perf-guide/bound_checks.html new file mode 100644 index 000000000..32a350839 --- /dev/null +++ b/perf-guide/bound_checks.html @@ -0,0 +1,246 @@ + + + + + + Bounds checking - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Bounds checking

+

Reading and writing packed vectors to/from slices is checked by default. +Independently of the configuration options used, the safe functions:

+
    +
  • Simd<[T; N]>::from_slice_aligned(& s[..])
  • +
  • Simd<[T; N]>::write_to_slice_aligned(&mut s[..])
  • +
+

always check that:

+
    +
  • the slice is big enough to hold the vector
  • +
  • the slice is suitably aligned to perform an aligned load/store for a Simd<[T; N]> (this alignment is often much larger than that of T).
  • +
+

There are _unaligned versions that use unaligned load and stores, as well as +unsafe _unchecked that do not perform any checks iff debug-assertions = false / debug = false. That is, the _unchecked methods do still assert size +and alignment in debug builds and could also do so in release builds depending +on the configuration options.

+

These assertions do often significantly impact performance and you should be +aware of them.

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/clipboard.min.js b/perf-guide/clipboard.min.js new file mode 100644 index 000000000..02c549e35 --- /dev/null +++ b/perf-guide/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.4 + * https://zenorocha.github.io/clipboard.js + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return function(n){var o={};function r(t){if(o[t])return o[t].exports;var e=o[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,r),e.l=!0,e.exports}return r.m=n,r.c=o,r.d=function(t,e,n){r.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:n})},r.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return r.d(e,"a",e),e},r.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},r.p="",r(r.s=0)}([function(t,e,n){"use strict";var r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},i=function(){function o(t,e){for(var n=0;n .hljs { + color: var(--links); +} + +/* Menu Bar */ + +#menu-bar, +#menu-bar-hover-placeholder { + z-index: 101; + margin: auto calc(0px - var(--page-padding)); +} +#menu-bar { + position: relative; + display: flex; + flex-wrap: wrap; + background-color: var(--bg); + border-bottom-color: var(--bg); + border-bottom-width: 1px; + border-bottom-style: solid; +} +#menu-bar.sticky, +.js #menu-bar-hover-placeholder:hover + #menu-bar, +.js #menu-bar:hover, +.js.sidebar-visible #menu-bar { + position: -webkit-sticky; + position: sticky; + top: 0 !important; +} +#menu-bar-hover-placeholder { + position: sticky; + position: -webkit-sticky; + top: 0; + height: var(--menu-bar-height); +} +#menu-bar.bordered { + border-bottom-color: var(--table-border-color); +} +#menu-bar i, #menu-bar .icon-button { + position: relative; + padding: 0 8px; + z-index: 10; + line-height: var(--menu-bar-height); + cursor: pointer; + transition: color 0.5s; +} +@media only screen and (max-width: 420px) { + #menu-bar i, #menu-bar .icon-button { + padding: 0 5px; + } +} + +.icon-button { + border: none; + background: none; + padding: 0; + color: inherit; +} +.icon-button i { + margin: 0; +} + +.right-buttons { + margin: 0 15px; +} +.right-buttons a { + text-decoration: none; +} + +.left-buttons { + display: flex; + margin: 0 5px; +} +.no-js .left-buttons { + display: none; +} + +.menu-title { + display: inline-block; + font-weight: 200; + font-size: 2rem; + line-height: var(--menu-bar-height); + text-align: center; + margin: 0; + flex: 1; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} +.js .menu-title { + cursor: pointer; +} + +.menu-bar, +.menu-bar:visited, +.nav-chapters, +.nav-chapters:visited, +.mobile-nav-chapters, +.mobile-nav-chapters:visited, +.menu-bar .icon-button, +.menu-bar a i { + color: var(--icons); +} + +.menu-bar i:hover, +.menu-bar .icon-button:hover, +.nav-chapters:hover, +.mobile-nav-chapters i:hover { + color: var(--icons-hover); +} + +/* Nav Icons */ + +.nav-chapters { + font-size: 2.5em; + text-align: center; + text-decoration: none; + + position: fixed; + top: 0; + bottom: 0; + margin: 0; + max-width: 150px; + min-width: 90px; + + display: flex; + justify-content: center; + align-content: center; + flex-direction: column; + + transition: color 0.5s, background-color 0.5s; +} + +.nav-chapters:hover { + text-decoration: none; + background-color: var(--theme-hover); + transition: background-color 0.15s, color 0.15s; +} + +.nav-wrapper { + margin-top: 50px; + display: none; +} + +.mobile-nav-chapters { + font-size: 2.5em; + text-align: center; + text-decoration: none; + width: 90px; + border-radius: 5px; + background-color: var(--sidebar-bg); +} + +.previous { + float: left; +} + +.next { + float: right; + right: var(--page-padding); +} + +@media only screen and (max-width: 1080px) { + .nav-wide-wrapper { display: none; } + .nav-wrapper { display: block; } +} + +@media only screen and (max-width: 1380px) { + .sidebar-visible .nav-wide-wrapper { display: none; } + .sidebar-visible .nav-wrapper { display: block; } +} + +/* Inline code */ + +:not(pre) > .hljs { + display: inline; + padding: 0.1em 0.3em; + border-radius: 3px; +} + +:not(pre):not(a) > .hljs { + color: var(--inline-code-color); + overflow-x: initial; +} + +a:hover > .hljs { + text-decoration: underline; +} + +pre { + position: relative; +} +pre > .buttons { + position: absolute; + z-index: 100; + right: 5px; + top: 5px; + + color: var(--sidebar-fg); + cursor: pointer; +} +pre > .buttons :hover { + color: var(--sidebar-active); +} +pre > .buttons i { + margin-left: 8px; +} +pre > .buttons button { + color: inherit; + background: transparent; + border: none; + cursor: inherit; +} +pre > .result { + margin-top: 10px; +} + +/* Search */ + +#searchresults a { + text-decoration: none; +} + +mark { + border-radius: 2px; + padding: 0 3px 1px 3px; + margin: 0 -3px -1px -3px; + background-color: var(--search-mark-bg); + transition: background-color 300ms linear; + cursor: pointer; +} + +mark.fade-out { + background-color: rgba(0,0,0,0) !important; + cursor: auto; +} + +.searchbar-outer { + margin-left: auto; + margin-right: auto; + max-width: var(--content-max-width); +} + +#searchbar { + width: 100%; + margin: 5px auto 0px auto; + padding: 10px 16px; + transition: box-shadow 300ms ease-in-out; + border: 1px solid var(--searchbar-border-color); + border-radius: 3px; + background-color: var(--searchbar-bg); + color: var(--searchbar-fg); +} +#searchbar:focus, +#searchbar.active { + box-shadow: 0 0 3px var(--searchbar-shadow-color); +} + +.searchresults-header { + font-weight: bold; + font-size: 1em; + padding: 18px 0 0 5px; + color: var(--searchresults-header-fg); +} + +.searchresults-outer { + margin-left: auto; + margin-right: auto; + max-width: var(--content-max-width); + border-bottom: 1px dashed var(--searchresults-border-color); +} + +ul#searchresults { + list-style: none; + padding-left: 20px; +} +ul#searchresults li { + margin: 10px 0px; + padding: 2px; + border-radius: 2px; +} +ul#searchresults li.focus { + background-color: var(--searchresults-li-bg); +} +ul#searchresults span.teaser { + display: block; + clear: both; + margin: 5px 0 0 20px; + font-size: 0.8em; +} +ul#searchresults span.teaser em { + font-weight: bold; + font-style: normal; +} + +/* Sidebar */ + +.sidebar { + position: fixed; + left: 0; + top: 0; + bottom: 0; + width: var(--sidebar-width); + font-size: 0.875em; + box-sizing: border-box; + -webkit-overflow-scrolling: touch; + overscroll-behavior-y: contain; + background-color: var(--sidebar-bg); + color: var(--sidebar-fg); +} +.sidebar-resizing { + -moz-user-select: none; + -webkit-user-select: none; + -ms-user-select: none; + user-select: none; +} +.js:not(.sidebar-resizing) .sidebar { + transition: transform 0.3s; /* Animation: slide away */ +} +.sidebar code { + line-height: 2em; +} +.sidebar .sidebar-scrollbox { + overflow-y: auto; + position: absolute; + top: 0; + bottom: 0; + left: 0; + right: 0; + padding: 10px 10px; +} +.sidebar .sidebar-resize-handle { + position: absolute; + cursor: col-resize; + width: 0; + right: 0; + top: 0; + bottom: 0; +} +.js .sidebar .sidebar-resize-handle { + cursor: col-resize; + width: 5px; +} +.sidebar-hidden .sidebar { + transform: translateX(calc(0px - var(--sidebar-width))); +} +.sidebar::-webkit-scrollbar { + background: var(--sidebar-bg); +} +.sidebar::-webkit-scrollbar-thumb { + background: var(--scrollbar); +} + +.sidebar-visible .page-wrapper { + transform: translateX(var(--sidebar-width)); +} +@media only screen and (min-width: 620px) { + .sidebar-visible .page-wrapper { + transform: none; + margin-left: var(--sidebar-width); + } +} + +.chapter { + list-style: none outside none; + padding-left: 0; + line-height: 2.2em; +} + +.chapter ol { + width: 100%; +} + +.chapter li { + display: flex; + color: var(--sidebar-non-existant); +} +.chapter li a { + display: block; + padding: 0; + text-decoration: none; + color: var(--sidebar-fg); +} + +.chapter li a:hover { + color: var(--sidebar-active); +} + +.chapter li a.active { + color: var(--sidebar-active); +} + +.chapter li > a.toggle { + cursor: pointer; + display: block; + margin-left: auto; + padding: 0 10px; + user-select: none; + opacity: 0.68; +} + +.chapter li > a.toggle div { + transition: transform 0.5s; +} + +/* collapse the section */ +.chapter li:not(.expanded) + li > ol { + display: none; +} + +.chapter li.chapter-item { + line-height: 1.5em; + margin-top: 0.6em; +} + +.chapter li.expanded > a.toggle div { + transform: rotate(90deg); +} + +.spacer { + width: 100%; + height: 3px; + margin: 5px 0px; +} +.chapter .spacer { + background-color: var(--sidebar-spacer); +} + +@media (-moz-touch-enabled: 1), (pointer: coarse) { + .chapter li a { padding: 5px 0; } + .spacer { margin: 10px 0; } +} + +.section { + list-style: none outside none; + padding-left: 20px; + line-height: 1.9em; +} + +/* Theme Menu Popup */ + +.theme-popup { + position: absolute; + left: 10px; + top: var(--menu-bar-height); + z-index: 1000; + border-radius: 4px; + font-size: 0.7em; + color: var(--fg); + background: var(--theme-popup-bg); + border: 1px solid var(--theme-popup-border); + margin: 0; + padding: 0; + list-style: none; + display: none; +} +.theme-popup .default { + color: var(--icons); +} +.theme-popup .theme { + width: 100%; + border: 0; + margin: 0; + padding: 2px 10px; + line-height: 25px; + white-space: nowrap; + text-align: left; + cursor: pointer; + color: inherit; + background: inherit; + font-size: inherit; +} +.theme-popup .theme:hover { + background-color: var(--theme-hover); +} +.theme-popup .theme:hover:first-child, +.theme-popup .theme:hover:last-child { + border-top-left-radius: inherit; + border-top-right-radius: inherit; +} diff --git a/perf-guide/css/general.css b/perf-guide/css/general.css new file mode 100644 index 000000000..815dae1ad --- /dev/null +++ b/perf-guide/css/general.css @@ -0,0 +1,174 @@ +/* Base styles and content styles */ + +@import 'variables.css'; + +:root { + /* Browser default font-size is 16px, this way 1 rem = 10px */ + font-size: 62.5%; +} + +html { + font-family: "Open Sans", sans-serif; + color: var(--fg); + background-color: var(--bg); + text-size-adjust: none; +} + +body { + margin: 0; + font-size: 1.6rem; + overflow-x: hidden; +} + +code { + font-family: "Source Code Pro", Consolas, "Ubuntu Mono", Menlo, "DejaVu Sans Mono", monospace, monospace !important; + font-size: 0.875em; /* please adjust the ace font size accordingly in editor.js */ +} + +/* Don't change font size in headers. */ +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + font-size: unset; +} + +.left { float: left; } +.right { float: right; } +.boring { opacity: 0.6; } +.hide-boring .boring { display: none; } +.hidden { display: none !important; } + +h2, h3 { margin-top: 2.5em; } +h4, h5 { margin-top: 2em; } + +.header + .header h3, +.header + .header h4, +.header + .header h5 { + margin-top: 1em; +} + +h1 a.header:target::before, +h2 a.header:target::before, +h3 a.header:target::before, +h4 a.header:target::before { + display: inline-block; + content: "»"; + margin-left: -30px; + width: 30px; +} + +h1 a.header:target, +h2 a.header:target, +h3 a.header:target, +h4 a.header:target { + scroll-margin-top: calc(var(--menu-bar-height) + 0.5em); +} + +.page { + outline: 0; + padding: 0 var(--page-padding); + margin-top: calc(0px - var(--menu-bar-height)); /* Compensate for the #menu-bar-hover-placeholder */ +} +.page-wrapper { + box-sizing: border-box; +} +.js:not(.sidebar-resizing) .page-wrapper { + transition: margin-left 0.3s ease, transform 0.3s ease; /* Animation: slide away */ +} + +.content { + overflow-y: auto; + padding: 0 15px; + padding-bottom: 50px; +} +.content main { + margin-left: auto; + margin-right: auto; + max-width: var(--content-max-width); +} +.content p { line-height: 1.45em; } +.content ol { line-height: 1.45em; } +.content ul { line-height: 1.45em; } +.content a { text-decoration: none; } +.content a:hover { text-decoration: underline; } +.content img { max-width: 100%; } +.content .header:link, +.content .header:visited { + color: var(--fg); +} +.content .header:link, +.content .header:visited:hover { + text-decoration: none; +} + +table { + margin: 0 auto; + border-collapse: collapse; +} +table td { + padding: 3px 20px; + border: 1px var(--table-border-color) solid; +} +table thead { + background: var(--table-header-bg); +} +table thead td { + font-weight: 700; + border: none; +} +table thead th { + padding: 3px 20px; +} +table thead tr { + border: 1px var(--table-header-bg) solid; +} +/* Alternate background colors for rows */ +table tbody tr:nth-child(2n) { + background: var(--table-alternate-bg); +} + + +blockquote { + margin: 20px 0; + padding: 0 20px; + color: var(--fg); + background-color: var(--quote-bg); + border-top: .1em solid var(--quote-border); + border-bottom: .1em solid var(--quote-border); +} + + +:not(.footnote-definition) + .footnote-definition, +.footnote-definition + :not(.footnote-definition) { + margin-top: 2em; +} +.footnote-definition { + font-size: 0.9em; + margin: 0.5em 0; +} +.footnote-definition p { + display: inline; +} + +.tooltiptext { + position: absolute; + visibility: hidden; + color: #fff; + background-color: #333; + transform: translateX(-50%); /* Center by moving tooltip 50% of its width left */ + left: -8px; /* Half of the width of the icon */ + top: -35px; + font-size: 0.8em; + text-align: center; + border-radius: 6px; + padding: 5px 8px; + margin: 5px; + z-index: 1000; +} +.tooltipped .tooltiptext { + visibility: visible; +} + +.chapter li.part-title { + color: var(--sidebar-fg); + margin: 5px 0px; + font-weight: bold; +} diff --git a/perf-guide/css/print.css b/perf-guide/css/print.css new file mode 100644 index 000000000..5e690f755 --- /dev/null +++ b/perf-guide/css/print.css @@ -0,0 +1,54 @@ + +#sidebar, +#menu-bar, +.nav-chapters, +.mobile-nav-chapters { + display: none; +} + +#page-wrapper.page-wrapper { + transform: none; + margin-left: 0px; + overflow-y: initial; +} + +#content { + max-width: none; + margin: 0; + padding: 0; +} + +.page { + overflow-y: initial; +} + +code { + background-color: #666666; + border-radius: 5px; + + /* Force background to be printed in Chrome */ + -webkit-print-color-adjust: exact; +} + +pre > .buttons { + z-index: 2; +} + +a, a:visited, a:active, a:hover { + color: #4183c4; + text-decoration: none; +} + +h1, h2, h3, h4, h5, h6 { + page-break-inside: avoid; + page-break-after: avoid; +} + +pre, code { + page-break-inside: avoid; + white-space: pre-wrap; +} + +.fa { + display: none !important; +} diff --git a/perf-guide/css/variables.css b/perf-guide/css/variables.css new file mode 100644 index 000000000..9534ec8d1 --- /dev/null +++ b/perf-guide/css/variables.css @@ -0,0 +1,253 @@ + +/* Globals */ + +:root { + --sidebar-width: 300px; + --page-padding: 15px; + --content-max-width: 750px; + --menu-bar-height: 50px; +} + +/* Themes */ + +.ayu { + --bg: hsl(210, 25%, 8%); + --fg: #c5c5c5; + + --sidebar-bg: #14191f; + --sidebar-fg: #c8c9db; + --sidebar-non-existant: #5c6773; + --sidebar-active: #ffb454; + --sidebar-spacer: #2d334f; + + --scrollbar: var(--sidebar-fg); + + --icons: #737480; + --icons-hover: #b7b9cc; + + --links: #0096cf; + + --inline-code-color: #ffb454; + + --theme-popup-bg: #14191f; + --theme-popup-border: #5c6773; + --theme-hover: #191f26; + + --quote-bg: hsl(226, 15%, 17%); + --quote-border: hsl(226, 15%, 22%); + + --table-border-color: hsl(210, 25%, 13%); + --table-header-bg: hsl(210, 25%, 28%); + --table-alternate-bg: hsl(210, 25%, 11%); + + --searchbar-border-color: #848484; + --searchbar-bg: #424242; + --searchbar-fg: #fff; + --searchbar-shadow-color: #d4c89f; + --searchresults-header-fg: #666; + --searchresults-border-color: #888; + --searchresults-li-bg: #252932; + --search-mark-bg: #e3b171; +} + +.coal { + --bg: hsl(200, 7%, 8%); + --fg: #98a3ad; + + --sidebar-bg: #292c2f; + --sidebar-fg: #a1adb8; + --sidebar-non-existant: #505254; + --sidebar-active: #3473ad; + --sidebar-spacer: #393939; + + --scrollbar: var(--sidebar-fg); + + --icons: #43484d; + --icons-hover: #b3c0cc; + + --links: #2b79a2; + + --inline-code-color: #c5c8c6;; + + --theme-popup-bg: #141617; + --theme-popup-border: #43484d; + --theme-hover: #1f2124; + + --quote-bg: hsl(234, 21%, 18%); + --quote-border: hsl(234, 21%, 23%); + + --table-border-color: hsl(200, 7%, 13%); + --table-header-bg: hsl(200, 7%, 28%); + --table-alternate-bg: hsl(200, 7%, 11%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #b7b7b7; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #98a3ad; + --searchresults-li-bg: #2b2b2f; + --search-mark-bg: #355c7d; +} + +.light { + --bg: hsl(0, 0%, 100%); + --fg: #333333; + + --sidebar-bg: #fafafa; + --sidebar-fg: #364149; + --sidebar-non-existant: #aaaaaa; + --sidebar-active: #008cff; + --sidebar-spacer: #f4f4f4; + + --scrollbar: #cccccc; + + --icons: #cccccc; + --icons-hover: #333333; + + --links: #4183c4; + + --inline-code-color: #6e6b5e; + + --theme-popup-bg: #fafafa; + --theme-popup-border: #cccccc; + --theme-hover: #e6e6e6; + + --quote-bg: hsl(197, 37%, 96%); + --quote-border: hsl(197, 37%, 91%); + + --table-border-color: hsl(0, 0%, 95%); + --table-header-bg: hsl(0, 0%, 80%); + --table-alternate-bg: hsl(0, 0%, 97%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #fafafa; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #888; + --searchresults-li-bg: #e4f2fe; + --search-mark-bg: #a2cff5; +} + +.navy { + --bg: hsl(226, 23%, 11%); + --fg: #bcbdd0; + + --sidebar-bg: #282d3f; + --sidebar-fg: #c8c9db; + --sidebar-non-existant: #505274; + --sidebar-active: #2b79a2; + --sidebar-spacer: #2d334f; + + --scrollbar: var(--sidebar-fg); + + --icons: #737480; + --icons-hover: #b7b9cc; + + --links: #2b79a2; + + --inline-code-color: #c5c8c6;; + + --theme-popup-bg: #161923; + --theme-popup-border: #737480; + --theme-hover: #282e40; + + --quote-bg: hsl(226, 15%, 17%); + --quote-border: hsl(226, 15%, 22%); + + --table-border-color: hsl(226, 23%, 16%); + --table-header-bg: hsl(226, 23%, 31%); + --table-alternate-bg: hsl(226, 23%, 14%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #aeaec6; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #5f5f71; + --searchresults-border-color: #5c5c68; + --searchresults-li-bg: #242430; + --search-mark-bg: #a2cff5; +} + +.rust { + --bg: hsl(60, 9%, 87%); + --fg: #262625; + + --sidebar-bg: #3b2e2a; + --sidebar-fg: #c8c9db; + --sidebar-non-existant: #505254; + --sidebar-active: #e69f67; + --sidebar-spacer: #45373a; + + --scrollbar: var(--sidebar-fg); + + --icons: #737480; + --icons-hover: #262625; + + --links: #2b79a2; + + --inline-code-color: #6e6b5e; + + --theme-popup-bg: #e1e1db; + --theme-popup-border: #b38f6b; + --theme-hover: #99908a; + + --quote-bg: hsl(60, 5%, 75%); + --quote-border: hsl(60, 5%, 70%); + + --table-border-color: hsl(60, 9%, 82%); + --table-header-bg: #b3a497; + --table-alternate-bg: hsl(60, 9%, 84%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #fafafa; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #888; + --searchresults-li-bg: #dec2a2; + --search-mark-bg: #e69f67; +} + +@media (prefers-color-scheme: dark) { + .light.no-js { + --bg: hsl(200, 7%, 8%); + --fg: #98a3ad; + + --sidebar-bg: #292c2f; + --sidebar-fg: #a1adb8; + --sidebar-non-existant: #505254; + --sidebar-active: #3473ad; + --sidebar-spacer: #393939; + + --scrollbar: var(--sidebar-fg); + + --icons: #43484d; + --icons-hover: #b3c0cc; + + --links: #2b79a2; + + --inline-code-color: #c5c8c6;; + + --theme-popup-bg: #141617; + --theme-popup-border: #43484d; + --theme-hover: #1f2124; + + --quote-bg: hsl(234, 21%, 18%); + --quote-border: hsl(234, 21%, 23%); + + --table-border-color: hsl(200, 7%, 13%); + --table-header-bg: hsl(200, 7%, 28%); + --table-alternate-bg: hsl(200, 7%, 11%); + + --searchbar-border-color: #aaa; + --searchbar-bg: #b7b7b7; + --searchbar-fg: #000; + --searchbar-shadow-color: #aaa; + --searchresults-header-fg: #666; + --searchresults-border-color: #98a3ad; + --searchresults-li-bg: #2b2b2f; + --search-mark-bg: #355c7d; + } +} diff --git a/perf-guide/elasticlunr.min.js b/perf-guide/elasticlunr.min.js new file mode 100644 index 000000000..94b20dd2e --- /dev/null +++ b/perf-guide/elasticlunr.min.js @@ -0,0 +1,10 @@ +/** + * elasticlunr - http://weixsong.github.io + * Lightweight full-text search engine in Javascript for browser search and offline search. - 0.9.5 + * + * Copyright (C) 2017 Oliver Nightingale + * Copyright (C) 2017 Wei Song + * MIT Licensed + * @license + */ +!function(){function e(e){if(null===e||"object"!=typeof e)return e;var t=e.constructor();for(var n in e)e.hasOwnProperty(n)&&(t[n]=e[n]);return t}var t=function(e){var n=new t.Index;return n.pipeline.add(t.trimmer,t.stopWordFilter,t.stemmer),e&&e.call(n,n),n};t.version="0.9.5",lunr=t,t.utils={},t.utils.warn=function(e){return function(t){e.console&&console.warn&&console.warn(t)}}(this),t.utils.toString=function(e){return void 0===e||null===e?"":e.toString()},t.EventEmitter=function(){this.events={}},t.EventEmitter.prototype.addListener=function(){var e=Array.prototype.slice.call(arguments),t=e.pop(),n=e;if("function"!=typeof t)throw new TypeError("last argument must be a function");n.forEach(function(e){this.hasHandler(e)||(this.events[e]=[]),this.events[e].push(t)},this)},t.EventEmitter.prototype.removeListener=function(e,t){if(this.hasHandler(e)){var n=this.events[e].indexOf(t);-1!==n&&(this.events[e].splice(n,1),0==this.events[e].length&&delete this.events[e])}},t.EventEmitter.prototype.emit=function(e){if(this.hasHandler(e)){var t=Array.prototype.slice.call(arguments,1);this.events[e].forEach(function(e){e.apply(void 0,t)},this)}},t.EventEmitter.prototype.hasHandler=function(e){return e in this.events},t.tokenizer=function(e){if(!arguments.length||null===e||void 0===e)return[];if(Array.isArray(e)){var n=e.filter(function(e){return null===e||void 0===e?!1:!0});n=n.map(function(e){return t.utils.toString(e).toLowerCase()});var i=[];return n.forEach(function(e){var n=e.split(t.tokenizer.seperator);i=i.concat(n)},this),i}return e.toString().trim().toLowerCase().split(t.tokenizer.seperator)},t.tokenizer.defaultSeperator=/[\s\-]+/,t.tokenizer.seperator=t.tokenizer.defaultSeperator,t.tokenizer.setSeperator=function(e){null!==e&&void 0!==e&&"object"==typeof e&&(t.tokenizer.seperator=e)},t.tokenizer.resetSeperator=function(){t.tokenizer.seperator=t.tokenizer.defaultSeperator},t.tokenizer.getSeperator=function(){return t.tokenizer.seperator},t.Pipeline=function(){this._queue=[]},t.Pipeline.registeredFunctions={},t.Pipeline.registerFunction=function(e,n){n in t.Pipeline.registeredFunctions&&t.utils.warn("Overwriting existing registered function: "+n),e.label=n,t.Pipeline.registeredFunctions[n]=e},t.Pipeline.getRegisteredFunction=function(e){return e in t.Pipeline.registeredFunctions!=!0?null:t.Pipeline.registeredFunctions[e]},t.Pipeline.warnIfFunctionNotRegistered=function(e){var n=e.label&&e.label in this.registeredFunctions;n||t.utils.warn("Function is not registered with pipeline. This may cause problems when serialising the index.\n",e)},t.Pipeline.load=function(e){var n=new t.Pipeline;return e.forEach(function(e){var i=t.Pipeline.getRegisteredFunction(e);if(!i)throw new Error("Cannot load un-registered function: "+e);n.add(i)}),n},t.Pipeline.prototype.add=function(){var e=Array.prototype.slice.call(arguments);e.forEach(function(e){t.Pipeline.warnIfFunctionNotRegistered(e),this._queue.push(e)},this)},t.Pipeline.prototype.after=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i+1,0,n)},t.Pipeline.prototype.before=function(e,n){t.Pipeline.warnIfFunctionNotRegistered(n);var i=this._queue.indexOf(e);if(-1===i)throw new Error("Cannot find existingFn");this._queue.splice(i,0,n)},t.Pipeline.prototype.remove=function(e){var t=this._queue.indexOf(e);-1!==t&&this._queue.splice(t,1)},t.Pipeline.prototype.run=function(e){for(var t=[],n=e.length,i=this._queue.length,o=0;n>o;o++){for(var r=e[o],s=0;i>s&&(r=this._queue[s](r,o,e),void 0!==r&&null!==r);s++);void 0!==r&&null!==r&&t.push(r)}return t},t.Pipeline.prototype.reset=function(){this._queue=[]},t.Pipeline.prototype.get=function(){return this._queue},t.Pipeline.prototype.toJSON=function(){return this._queue.map(function(e){return t.Pipeline.warnIfFunctionNotRegistered(e),e.label})},t.Index=function(){this._fields=[],this._ref="id",this.pipeline=new t.Pipeline,this.documentStore=new t.DocumentStore,this.index={},this.eventEmitter=new t.EventEmitter,this._idfCache={},this.on("add","remove","update",function(){this._idfCache={}}.bind(this))},t.Index.prototype.on=function(){var e=Array.prototype.slice.call(arguments);return this.eventEmitter.addListener.apply(this.eventEmitter,e)},t.Index.prototype.off=function(e,t){return this.eventEmitter.removeListener(e,t)},t.Index.load=function(e){e.version!==t.version&&t.utils.warn("version mismatch: current "+t.version+" importing "+e.version);var n=new this;n._fields=e.fields,n._ref=e.ref,n.documentStore=t.DocumentStore.load(e.documentStore),n.pipeline=t.Pipeline.load(e.pipeline),n.index={};for(var i in e.index)n.index[i]=t.InvertedIndex.load(e.index[i]);return n},t.Index.prototype.addField=function(e){return this._fields.push(e),this.index[e]=new t.InvertedIndex,this},t.Index.prototype.setRef=function(e){return this._ref=e,this},t.Index.prototype.saveDocument=function(e){return this.documentStore=new t.DocumentStore(e),this},t.Index.prototype.addDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.addDoc(i,e),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));this.documentStore.addFieldLength(i,n,o.length);var r={};o.forEach(function(e){e in r?r[e]+=1:r[e]=1},this);for(var s in r){var u=r[s];u=Math.sqrt(u),this.index[n].addToken(s,{ref:i,tf:u})}},this),n&&this.eventEmitter.emit("add",e,this)}},t.Index.prototype.removeDocByRef=function(e){if(e&&this.documentStore.isDocStored()!==!1&&this.documentStore.hasDoc(e)){var t=this.documentStore.getDoc(e);this.removeDoc(t,!1)}},t.Index.prototype.removeDoc=function(e,n){if(e){var n=void 0===n?!0:n,i=e[this._ref];this.documentStore.hasDoc(i)&&(this.documentStore.removeDoc(i),this._fields.forEach(function(n){var o=this.pipeline.run(t.tokenizer(e[n]));o.forEach(function(e){this.index[n].removeToken(e,i)},this)},this),n&&this.eventEmitter.emit("remove",e,this))}},t.Index.prototype.updateDoc=function(e,t){var t=void 0===t?!0:t;this.removeDocByRef(e[this._ref],!1),this.addDoc(e,!1),t&&this.eventEmitter.emit("update",e,this)},t.Index.prototype.idf=function(e,t){var n="@"+t+"/"+e;if(Object.prototype.hasOwnProperty.call(this._idfCache,n))return this._idfCache[n];var i=this.index[t].getDocFreq(e),o=1+Math.log(this.documentStore.length/(i+1));return this._idfCache[n]=o,o},t.Index.prototype.getFields=function(){return this._fields.slice()},t.Index.prototype.search=function(e,n){if(!e)return[];e="string"==typeof e?{any:e}:JSON.parse(JSON.stringify(e));var i=null;null!=n&&(i=JSON.stringify(n));for(var o=new t.Configuration(i,this.getFields()).get(),r={},s=Object.keys(e),u=0;u0&&t.push(e);for(var i in n)"docs"!==i&&"df"!==i&&this.expandToken(e+i,t,n[i]);return t},t.InvertedIndex.prototype.toJSON=function(){return{root:this.root}},t.Configuration=function(e,n){var e=e||"";if(void 0==n||null==n)throw new Error("fields should not be null");this.config={};var i;try{i=JSON.parse(e),this.buildUserConfig(i,n)}catch(o){t.utils.warn("user configuration parse failed, will use default configuration"),this.buildDefaultConfig(n)}},t.Configuration.prototype.buildDefaultConfig=function(e){this.reset(),e.forEach(function(e){this.config[e]={boost:1,bool:"OR",expand:!1}},this)},t.Configuration.prototype.buildUserConfig=function(e,n){var i="OR",o=!1;if(this.reset(),"bool"in e&&(i=e.bool||i),"expand"in e&&(o=e.expand||o),"fields"in e)for(var r in e.fields)if(n.indexOf(r)>-1){var s=e.fields[r],u=o;void 0!=s.expand&&(u=s.expand),this.config[r]={boost:s.boost||0===s.boost?s.boost:1,bool:s.bool||i,expand:u}}else t.utils.warn("field name in user configuration not found in index instance fields");else this.addAllFields2UserConfig(i,o,n)},t.Configuration.prototype.addAllFields2UserConfig=function(e,t,n){n.forEach(function(n){this.config[n]={boost:1,bool:e,expand:t}},this)},t.Configuration.prototype.get=function(){return this.config},t.Configuration.prototype.reset=function(){this.config={}},lunr.SortedSet=function(){this.length=0,this.elements=[]},lunr.SortedSet.load=function(e){var t=new this;return t.elements=e,t.length=e.length,t},lunr.SortedSet.prototype.add=function(){var e,t;for(e=0;e1;){if(r===e)return o;e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o]}return r===e?o:-1},lunr.SortedSet.prototype.locationFor=function(e){for(var t=0,n=this.elements.length,i=n-t,o=t+Math.floor(i/2),r=this.elements[o];i>1;)e>r&&(t=o),r>e&&(n=o),i=n-t,o=t+Math.floor(i/2),r=this.elements[o];return r>e?o:e>r?o+1:void 0},lunr.SortedSet.prototype.intersect=function(e){for(var t=new lunr.SortedSet,n=0,i=0,o=this.length,r=e.length,s=this.elements,u=e.elements;;){if(n>o-1||i>r-1)break;s[n]!==u[i]?s[n]u[i]&&i++:(t.add(s[n]),n++,i++)}return t},lunr.SortedSet.prototype.clone=function(){var e=new lunr.SortedSet;return e.elements=this.toArray(),e.length=e.elements.length,e},lunr.SortedSet.prototype.union=function(e){var t,n,i;this.length>=e.length?(t=this,n=e):(t=e,n=this),i=t.clone();for(var o=0,r=n.toArray();o + + + + diff --git a/perf-guide/float-math/approx.html b/perf-guide/float-math/approx.html new file mode 100644 index 000000000..514354db8 --- /dev/null +++ b/perf-guide/float-math/approx.html @@ -0,0 +1,235 @@ + + + + + + Approximate functions - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Approximate functions

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/float-math/fma.html b/perf-guide/float-math/fma.html new file mode 100644 index 000000000..b49231c44 --- /dev/null +++ b/perf-guide/float-math/fma.html @@ -0,0 +1,233 @@ + + + + + + Fused multiply-accumulate - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Fused Multiply Add

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/float-math/fp.html b/perf-guide/float-math/fp.html new file mode 100644 index 000000000..5355b55b4 --- /dev/null +++ b/perf-guide/float-math/fp.html @@ -0,0 +1,230 @@ + + + + + + Floating-point Math - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Floating-point math

+

This chapter contains information pertaining to working with floating-point numbers.

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/float-math/svml.html b/perf-guide/float-math/svml.html new file mode 100644 index 000000000..a6f791f96 --- /dev/null +++ b/perf-guide/float-math/svml.html @@ -0,0 +1,234 @@ + + + + + + Short-vector Math Library - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Short Vector Math Library

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/fonts/OPEN-SANS-LICENSE.txt b/perf-guide/fonts/OPEN-SANS-LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/perf-guide/fonts/OPEN-SANS-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/perf-guide/fonts/SOURCE-CODE-PRO-LICENSE.txt b/perf-guide/fonts/SOURCE-CODE-PRO-LICENSE.txt new file mode 100644 index 000000000..366206f54 --- /dev/null +++ b/perf-guide/fonts/SOURCE-CODE-PRO-LICENSE.txt @@ -0,0 +1,93 @@ +Copyright 2010, 2012 Adobe Systems Incorporated (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe Systems Incorporated in the United States and/or other countries. + +This Font Software is licensed under the SIL Open Font License, Version 1.1. +This license is copied below, and is also available with a FAQ at: +http://scripts.sil.org/OFL + + +----------------------------------------------------------- +SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 +----------------------------------------------------------- + +PREAMBLE +The goals of the Open Font License (OFL) are to stimulate worldwide +development of collaborative font projects, to support the font creation +efforts of academic and linguistic communities, and to provide a free and +open framework in which fonts may be shared and improved in partnership +with others. + +The OFL allows the licensed fonts to be used, studied, modified and +redistributed freely as long as they are not sold by themselves. The +fonts, including any derivative works, can be bundled, embedded, +redistributed and/or sold with any software provided that any reserved +names are not used by derivative works. The fonts and derivatives, +however, cannot be released under any other type of license. The +requirement for fonts to remain under this license does not apply +to any document created using the fonts or their derivatives. + +DEFINITIONS +"Font Software" refers to the set of files released by the Copyright +Holder(s) under this license and clearly marked as such. This may +include source files, build scripts and documentation. + +"Reserved Font Name" refers to any names specified as such after the +copyright statement(s). + +"Original Version" refers to the collection of Font Software components as +distributed by the Copyright Holder(s). + +"Modified Version" refers to any derivative made by adding to, deleting, +or substituting -- in part or in whole -- any of the components of the +Original Version, by changing formats or by porting the Font Software to a +new environment. + +"Author" refers to any designer, engineer, programmer, technical +writer or other person who contributed to the Font Software. + +PERMISSION & CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Font Software, to use, study, copy, merge, embed, modify, +redistribute, and sell modified and unmodified copies of the Font +Software, subject to the following conditions: + +1) Neither the Font Software nor any of its individual components, +in Original or Modified Versions, may be sold by itself. + +2) Original or Modified Versions of the Font Software may be bundled, +redistributed and/or sold with any software, provided that each copy +contains the above copyright notice and this license. These can be +included either as stand-alone text files, human-readable headers or +in the appropriate machine-readable metadata fields within text or +binary files as long as those fields can be easily viewed by the user. + +3) No Modified Version of the Font Software may use the Reserved Font +Name(s) unless explicit written permission is granted by the corresponding +Copyright Holder. This restriction only applies to the primary font name as +presented to the users. + +4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font +Software shall not be used to promote, endorse or advertise any +Modified Version, except to acknowledge the contribution(s) of the +Copyright Holder(s) and the Author(s) or with their explicit written +permission. + +5) The Font Software, modified or unmodified, in part or in whole, +must be distributed entirely under this license, and must not be +distributed under any other license. The requirement for fonts to +remain under this license does not apply to any document created +using the Font Software. + +TERMINATION +This license becomes null and void if any of the above conditions are +not met. + +DISCLAIMER +THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE +COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL +DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM +OTHER DEALINGS IN THE FONT SOFTWARE. diff --git a/perf-guide/fonts/fonts.css b/perf-guide/fonts/fonts.css new file mode 100644 index 000000000..b2b63d027 --- /dev/null +++ b/perf-guide/fonts/fonts.css @@ -0,0 +1,101 @@ +/* Open Sans is licensed under the Apache License, Version 2.0. See http://www.apache.org/licenses/LICENSE-2.0 */ +/* Source Code Pro is under the Open Font License. See https://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=OFL */ + +/* open-sans-300 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 300; + src: local('Open Sans Light'), local('OpenSans-Light'), + url('open-sans-v17-all-charsets-300.woff2') format('woff2'); +} + +/* open-sans-300italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 300; + src: local('Open Sans Light Italic'), local('OpenSans-LightItalic'), + url('open-sans-v17-all-charsets-300italic.woff2') format('woff2'); +} + +/* open-sans-regular - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 400; + src: local('Open Sans Regular'), local('OpenSans-Regular'), + url('open-sans-v17-all-charsets-regular.woff2') format('woff2'); +} + +/* open-sans-italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 400; + src: local('Open Sans Italic'), local('OpenSans-Italic'), + url('open-sans-v17-all-charsets-italic.woff2') format('woff2'); +} + +/* open-sans-600 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 600; + src: local('Open Sans SemiBold'), local('OpenSans-SemiBold'), + url('open-sans-v17-all-charsets-600.woff2') format('woff2'); +} + +/* open-sans-600italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 600; + src: local('Open Sans SemiBold Italic'), local('OpenSans-SemiBoldItalic'), + url('open-sans-v17-all-charsets-600italic.woff2') format('woff2'); +} + +/* open-sans-700 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 700; + src: local('Open Sans Bold'), local('OpenSans-Bold'), + url('open-sans-v17-all-charsets-700.woff2') format('woff2'); +} + +/* open-sans-700italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 700; + src: local('Open Sans Bold Italic'), local('OpenSans-BoldItalic'), + url('open-sans-v17-all-charsets-700italic.woff2') format('woff2'); +} + +/* open-sans-800 - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: normal; + font-weight: 800; + src: local('Open Sans ExtraBold'), local('OpenSans-ExtraBold'), + url('open-sans-v17-all-charsets-800.woff2') format('woff2'); +} + +/* open-sans-800italic - latin_vietnamese_latin-ext_greek-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Open Sans'; + font-style: italic; + font-weight: 800; + src: local('Open Sans ExtraBold Italic'), local('OpenSans-ExtraBoldItalic'), + url('open-sans-v17-all-charsets-800italic.woff2') format('woff2'); +} + +/* source-code-pro-500 - latin_vietnamese_latin-ext_greek_cyrillic-ext_cyrillic */ +@font-face { + font-family: 'Source Code Pro'; + font-style: normal; + font-weight: 500; + src: local('Source Code Pro Medium'), local('SourceCodePro-Medium'), + url('source-code-pro-v11-all-charsets-500.woff2') format('woff2'); +} diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-300.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-300.woff2 new file mode 100644 index 000000000..9f51be370 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-300.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-300italic.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-300italic.woff2 new file mode 100644 index 000000000..2f5454484 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-300italic.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-600.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-600.woff2 new file mode 100644 index 000000000..f503d558d Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-600.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-600italic.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-600italic.woff2 new file mode 100644 index 000000000..c99aabe80 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-600italic.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-700.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-700.woff2 new file mode 100644 index 000000000..421a1ab25 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-700.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-700italic.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-700italic.woff2 new file mode 100644 index 000000000..12ce3d20d Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-700italic.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-800.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-800.woff2 new file mode 100644 index 000000000..c94a223b0 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-800.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-800italic.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-800italic.woff2 new file mode 100644 index 000000000..eed7d3c63 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-800italic.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-italic.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-italic.woff2 new file mode 100644 index 000000000..398b68a08 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-italic.woff2 differ diff --git a/perf-guide/fonts/open-sans-v17-all-charsets-regular.woff2 b/perf-guide/fonts/open-sans-v17-all-charsets-regular.woff2 new file mode 100644 index 000000000..8383e94c6 Binary files /dev/null and b/perf-guide/fonts/open-sans-v17-all-charsets-regular.woff2 differ diff --git a/perf-guide/fonts/source-code-pro-v11-all-charsets-500.woff2 b/perf-guide/fonts/source-code-pro-v11-all-charsets-500.woff2 new file mode 100644 index 000000000..722245682 Binary files /dev/null and b/perf-guide/fonts/source-code-pro-v11-all-charsets-500.woff2 differ diff --git a/perf-guide/highlight.css b/perf-guide/highlight.css new file mode 100644 index 000000000..ab8c49c68 --- /dev/null +++ b/perf-guide/highlight.css @@ -0,0 +1,79 @@ +/* Base16 Atelier Dune Light - Theme */ +/* by Bram de Haan (http://atelierbram.github.io/syntax-highlighting/atelier-schemes/dune) */ +/* Original Base16 color scheme by Chris Kempson (https://github.com/chriskempson/base16) */ + +/* Atelier-Dune Comment */ +.hljs-comment, +.hljs-quote { + color: #AAA; +} + +/* Atelier-Dune Red */ +.hljs-variable, +.hljs-template-variable, +.hljs-attribute, +.hljs-tag, +.hljs-name, +.hljs-regexp, +.hljs-link, +.hljs-name, +.hljs-selector-id, +.hljs-selector-class { + color: #d73737; +} + +/* Atelier-Dune Orange */ +.hljs-number, +.hljs-meta, +.hljs-built_in, +.hljs-builtin-name, +.hljs-literal, +.hljs-type, +.hljs-params { + color: #b65611; +} + +/* Atelier-Dune Green */ +.hljs-string, +.hljs-symbol, +.hljs-bullet { + color: #60ac39; +} + +/* Atelier-Dune Blue */ +.hljs-title, +.hljs-section { + color: #6684e1; +} + +/* Atelier-Dune Purple */ +.hljs-keyword, +.hljs-selector-tag { + color: #b854d4; +} + +.hljs { + display: block; + overflow-x: auto; + background: #f1f1f1; + color: #6e6b5e; + padding: 0.5em; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} + +.hljs-addition { + color: #22863a; + background-color: #f0fff4; +} + +.hljs-deletion { + color: #b31d28; + background-color: #ffeef0; +} diff --git a/perf-guide/highlight.js b/perf-guide/highlight.js new file mode 100644 index 000000000..7a8a229e3 --- /dev/null +++ b/perf-guide/highlight.js @@ -0,0 +1,2 @@ +/*! highlight.js v9.15.10 | BSD3 License | git.io/hljslicense */ +!function(e){var n="object"==typeof window&&window||"object"==typeof self&&self;"undefined"==typeof exports||exports.nodeType?n&&(n.hljs=e({}),"function"==typeof define&&define.amd&&define([],function(){return n.hljs})):e(exports)}(function(a){var f=[],u=Object.keys,N={},c={},n=/^(no-?highlight|plain|text)$/i,s=/\blang(?:uage)?-([\w-]+)\b/i,t=/((^(<[^>]+>|\t|)+|(?:\n)))/gm,r={case_insensitive:"cI",lexemes:"l",contains:"c",keywords:"k",subLanguage:"sL",className:"cN",begin:"b",beginKeywords:"bK",end:"e",endsWithParent:"eW",illegal:"i",excludeBegin:"eB",excludeEnd:"eE",returnBegin:"rB",returnEnd:"rE",relevance:"r",variants:"v",IDENT_RE:"IR",UNDERSCORE_IDENT_RE:"UIR",NUMBER_RE:"NR",C_NUMBER_RE:"CNR",BINARY_NUMBER_RE:"BNR",RE_STARTERS_RE:"RSR",BACKSLASH_ESCAPE:"BE",APOS_STRING_MODE:"ASM",QUOTE_STRING_MODE:"QSM",PHRASAL_WORDS_MODE:"PWM",C_LINE_COMMENT_MODE:"CLCM",C_BLOCK_COMMENT_MODE:"CBCM",HASH_COMMENT_MODE:"HCM",NUMBER_MODE:"NM",C_NUMBER_MODE:"CNM",BINARY_NUMBER_MODE:"BNM",CSS_NUMBER_MODE:"CSSNM",REGEXP_MODE:"RM",TITLE_MODE:"TM",UNDERSCORE_TITLE_MODE:"UTM",COMMENT:"C",beginRe:"bR",endRe:"eR",illegalRe:"iR",lexemesRe:"lR",terminators:"t",terminator_end:"tE"},b="",h={classPrefix:"hljs-",tabReplace:null,useBR:!1,languages:void 0};function _(e){return e.replace(/&/g,"&").replace(//g,">")}function E(e){return e.nodeName.toLowerCase()}function v(e,n){var t=e&&e.exec(n);return t&&0===t.index}function l(e){return n.test(e)}function g(e){var n,t={},r=Array.prototype.slice.call(arguments,1);for(n in e)t[n]=e[n];return r.forEach(function(e){for(n in e)t[n]=e[n]}),t}function R(e){var a=[];return function e(n,t){for(var r=n.firstChild;r;r=r.nextSibling)3===r.nodeType?t+=r.nodeValue.length:1===r.nodeType&&(a.push({event:"start",offset:t,node:r}),t=e(r,t),E(r).match(/br|hr|img|input/)||a.push({event:"stop",offset:t,node:r}));return t}(e,0),a}function i(e){if(r&&!e.langApiRestored){for(var n in e.langApiRestored=!0,r)e[n]&&(e[r[n]]=e[n]);(e.c||[]).concat(e.v||[]).forEach(i)}}function m(o){function s(e){return e&&e.source||e}function c(e,n){return new RegExp(s(e),"m"+(o.cI?"i":"")+(n?"g":""))}!function n(t,e){if(!t.compiled){if(t.compiled=!0,t.k=t.k||t.bK,t.k){function r(t,e){o.cI&&(e=e.toLowerCase()),e.split(" ").forEach(function(e){var n=e.split("|");a[n[0]]=[t,n[1]?Number(n[1]):1]})}var a={};"string"==typeof t.k?r("keyword",t.k):u(t.k).forEach(function(e){r(e,t.k[e])}),t.k=a}t.lR=c(t.l||/\w+/,!0),e&&(t.bK&&(t.b="\\b("+t.bK.split(" ").join("|")+")\\b"),t.b||(t.b=/\B|\b/),t.bR=c(t.b),t.endSameAsBegin&&(t.e=t.b),t.e||t.eW||(t.e=/\B|\b/),t.e&&(t.eR=c(t.e)),t.tE=s(t.e)||"",t.eW&&e.tE&&(t.tE+=(t.e?"|":"")+e.tE)),t.i&&(t.iR=c(t.i)),null==t.r&&(t.r=1),t.c||(t.c=[]),t.c=Array.prototype.concat.apply([],t.c.map(function(e){return function(n){return n.v&&!n.cached_variants&&(n.cached_variants=n.v.map(function(e){return g(n,{v:null},e)})),n.cached_variants||n.eW&&[g(n)]||[n]}("self"===e?t:e)})),t.c.forEach(function(e){n(e,t)}),t.starts&&n(t.starts,e);var i=t.c.map(function(e){return e.bK?"\\.?(?:"+e.b+")\\.?":e.b}).concat([t.tE,t.i]).map(s).filter(Boolean);t.t=i.length?c(function(e,n){for(var t=/\[(?:[^\\\]]|\\.)*\]|\(\??|\\([1-9][0-9]*)|\\./,r=0,a="",i=0;i')+n+(t?"":b):n}function o(){E+=null!=l.sL?function(){var e="string"==typeof l.sL;if(e&&!N[l.sL])return _(g);var n=e?C(l.sL,g,!0,f[l.sL]):O(g,l.sL.length?l.sL:void 0);return 0")+'"');return g+=n,n.length||1}var s=B(e);if(!s)throw new Error('Unknown language: "'+e+'"');m(s);var a,l=t||s,f={},E="";for(a=l;a!==s;a=a.parent)a.cN&&(E=c(a.cN,"",!0)+E);var g="",R=0;try{for(var d,p,M=0;l.t.lastIndex=M,d=l.t.exec(n);)p=r(n.substring(M,d.index),d[0]),M=d.index+p;for(r(n.substr(M)),a=l;a.parent;a=a.parent)a.cN&&(E+=b);return{r:R,value:E,language:e,top:l}}catch(e){if(e.message&&-1!==e.message.indexOf("Illegal"))return{r:0,value:_(n)};throw e}}function O(t,e){e=e||h.languages||u(N);var r={r:0,value:_(t)},a=r;return e.filter(B).filter(M).forEach(function(e){var n=C(e,t,!1);n.language=e,n.r>a.r&&(a=n),n.r>r.r&&(a=r,r=n)}),a.language&&(r.second_best=a),r}function d(e){return h.tabReplace||h.useBR?e.replace(t,function(e,n){return h.useBR&&"\n"===e?"
":h.tabReplace?n.replace(/\t/g,h.tabReplace):""}):e}function o(e){var n,t,r,a,i,o=function(e){var n,t,r,a,i=e.className+" ";if(i+=e.parentNode?e.parentNode.className:"",t=s.exec(i))return B(t[1])?t[1]:"no-highlight";for(n=0,r=(i=i.split(/\s+/)).length;n/g,"\n"):n=e,i=n.textContent,r=o?C(o,i,!0):O(i),(t=R(n)).length&&((a=document.createElementNS("http://www.w3.org/1999/xhtml","div")).innerHTML=r.value,r.value=function(e,n,t){var r=0,a="",i=[];function o(){return e.length&&n.length?e[0].offset!==n[0].offset?e[0].offset"}function u(e){a+=""}function s(e){("start"===e.event?c:u)(e.node)}for(;e.length||n.length;){var l=o();if(a+=_(t.substring(r,l[0].offset)),r=l[0].offset,l===e){for(i.reverse().forEach(u);s(l.splice(0,1)[0]),(l=o())===e&&l.length&&l[0].offset===r;);i.reverse().forEach(c)}else"start"===l[0].event?i.push(l[0].node):i.pop(),s(l.splice(0,1)[0])}return a+_(t.substr(r))}(t,R(a),i)),r.value=d(r.value),e.innerHTML=r.value,e.className=function(e,n,t){var r=n?c[n]:t,a=[e.trim()];return e.match(/\bhljs\b/)||a.push("hljs"),-1===e.indexOf(r)&&a.push(r),a.join(" ").trim()}(e.className,o,r.language),e.result={language:r.language,re:r.r},r.second_best&&(e.second_best={language:r.second_best.language,re:r.second_best.r}))}function p(){if(!p.called){p.called=!0;var e=document.querySelectorAll("pre code");f.forEach.call(e,o)}}function B(e){return e=(e||"").toLowerCase(),N[e]||N[c[e]]}function M(e){var n=B(e);return n&&!n.disableAutodetect}return a.highlight=C,a.highlightAuto=O,a.fixMarkup=d,a.highlightBlock=o,a.configure=function(e){h=g(h,e)},a.initHighlighting=p,a.initHighlightingOnLoad=function(){addEventListener("DOMContentLoaded",p,!1),addEventListener("load",p,!1)},a.registerLanguage=function(n,e){var t=N[n]=e(a);i(t),t.aliases&&t.aliases.forEach(function(e){c[e]=n})},a.listLanguages=function(){return u(N)},a.getLanguage=B,a.autoDetection=M,a.inherit=g,a.IR=a.IDENT_RE="[a-zA-Z]\\w*",a.UIR=a.UNDERSCORE_IDENT_RE="[a-zA-Z_]\\w*",a.NR=a.NUMBER_RE="\\b\\d+(\\.\\d+)?",a.CNR=a.C_NUMBER_RE="(-?)(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)",a.BNR=a.BINARY_NUMBER_RE="\\b(0b[01]+)",a.RSR=a.RE_STARTERS_RE="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~",a.BE=a.BACKSLASH_ESCAPE={b:"\\\\[\\s\\S]",r:0},a.ASM=a.APOS_STRING_MODE={cN:"string",b:"'",e:"'",i:"\\n",c:[a.BE]},a.QSM=a.QUOTE_STRING_MODE={cN:"string",b:'"',e:'"',i:"\\n",c:[a.BE]},a.PWM=a.PHRASAL_WORDS_MODE={b:/\b(a|an|the|are|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|they|like|more)\b/},a.C=a.COMMENT=function(e,n,t){var r=a.inherit({cN:"comment",b:e,e:n,c:[]},t||{});return r.c.push(a.PWM),r.c.push({cN:"doctag",b:"(?:TODO|FIXME|NOTE|BUG|XXX):",r:0}),r},a.CLCM=a.C_LINE_COMMENT_MODE=a.C("//","$"),a.CBCM=a.C_BLOCK_COMMENT_MODE=a.C("/\\*","\\*/"),a.HCM=a.HASH_COMMENT_MODE=a.C("#","$"),a.NM=a.NUMBER_MODE={cN:"number",b:a.NR,r:0},a.CNM=a.C_NUMBER_MODE={cN:"number",b:a.CNR,r:0},a.BNM=a.BINARY_NUMBER_MODE={cN:"number",b:a.BNR,r:0},a.CSSNM=a.CSS_NUMBER_MODE={cN:"number",b:a.NR+"(%|em|ex|ch|rem|vw|vh|vmin|vmax|cm|mm|in|pt|pc|px|deg|grad|rad|turn|s|ms|Hz|kHz|dpi|dpcm|dppx)?",r:0},a.RM=a.REGEXP_MODE={cN:"regexp",b:/\//,e:/\/[gimuy]*/,i:/\n/,c:[a.BE,{b:/\[/,e:/\]/,r:0,c:[a.BE]}]},a.TM=a.TITLE_MODE={cN:"title",b:a.IR,r:0},a.UTM=a.UNDERSCORE_TITLE_MODE={cN:"title",b:a.UIR,r:0},a.METHOD_GUARD={b:"\\.\\s*"+a.UIR,r:0},a});hljs.registerLanguage("properties",function(r){var t="[ \\t\\f]*",e="("+t+"[:=]"+t+"|[ \\t\\f]+)",s="([^\\\\\\W:= \\t\\f\\n]|\\\\.)+",n="([^\\\\:= \\t\\f\\n]|\\\\.)+",a={e:e,r:0,starts:{cN:"string",e:/$/,r:0,c:[{b:"\\\\\\n"}]}};return{cI:!0,i:/\S/,c:[r.C("^\\s*[!#]","$"),{b:s+e,rB:!0,c:[{cN:"attr",b:s,endsParent:!0,r:0}],starts:a},{b:n+e,rB:!0,r:0,c:[{cN:"meta",b:n,endsParent:!0,r:0}],starts:a},{cN:"attr",r:0,b:n+t+"$"}]}});hljs.registerLanguage("json",function(e){var i={literal:"true false null"},n=[e.QSM,e.CNM],r={e:",",eW:!0,eE:!0,c:n,k:i},t={b:"{",e:"}",c:[{cN:"attr",b:/"/,e:/"/,c:[e.BE],i:"\\n"},e.inherit(r,{b:/:/})],i:"\\S"},c={b:"\\[",e:"\\]",c:[e.inherit(r)],i:"\\S"};return n.splice(n.length,0,t,c),{c:n,k:i,i:"\\S"}});hljs.registerLanguage("javascript",function(e){var r="[A-Za-z$_][0-9A-Za-z$_]*",t={keyword:"in of if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const export super debugger as async await static import from as",literal:"true false null undefined NaN Infinity",built_in:"eval isFinite isNaN parseFloat parseInt decodeURI decodeURIComponent encodeURI encodeURIComponent escape unescape Object Function Boolean Error EvalError InternalError RangeError ReferenceError StopIteration SyntaxError TypeError URIError Number Math Date String RegExp Array Float32Array Float64Array Int16Array Int32Array Int8Array Uint16Array Uint32Array Uint8Array Uint8ClampedArray ArrayBuffer DataView JSON Intl arguments require module console window document Symbol Set Map WeakSet WeakMap Proxy Reflect Promise"},a={cN:"number",v:[{b:"\\b(0[bB][01]+)"},{b:"\\b(0[oO][0-7]+)"},{b:e.CNR}],r:0},s={cN:"subst",b:"\\$\\{",e:"\\}",k:t,c:[]},c={b:"html`",e:"",starts:{e:"`",rE:!1,c:[e.BE,s],sL:"xml"}},n={b:"css`",e:"",starts:{e:"`",rE:!1,c:[e.BE,s],sL:"css"}},o={cN:"string",b:"`",e:"`",c:[e.BE,s]};s.c=[e.ASM,e.QSM,c,n,o,a,e.RM];var i=s.c.concat([e.CBCM,e.CLCM]);return{aliases:["js","jsx"],k:t,c:[{cN:"meta",r:10,b:/^\s*['"]use (strict|asm)['"]/},{cN:"meta",b:/^#!/,e:/$/},e.ASM,e.QSM,c,n,o,e.CLCM,e.CBCM,a,{b:/[{,]\s*/,r:0,c:[{b:r+"\\s*:",rB:!0,r:0,c:[{cN:"attr",b:r,r:0}]}]},{b:"("+e.RSR+"|\\b(case|return|throw)\\b)\\s*",k:"return throw case",c:[e.CLCM,e.CBCM,e.RM,{cN:"function",b:"(\\(.*?\\)|"+r+")\\s*=>",rB:!0,e:"\\s*=>",c:[{cN:"params",v:[{b:r},{b:/\(\s*\)/},{b:/\(/,e:/\)/,eB:!0,eE:!0,k:t,c:i}]}]},{cN:"",b:/\s/,e:/\s*/,skip:!0},{b://,sL:"xml",c:[{b:/<[A-Za-z0-9\\._:-]+\s*\/>/,skip:!0},{b:/<[A-Za-z0-9\\._:-]+/,e:/(\/[A-Za-z0-9\\._:-]+|[A-Za-z0-9\\._:-]+\/)>/,skip:!0,c:[{b:/<[A-Za-z0-9\\._:-]+\s*\/>/,skip:!0},"self"]}]}],r:0},{cN:"function",bK:"function",e:/\{/,eE:!0,c:[e.inherit(e.TM,{b:r}),{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,c:i}],i:/\[|%/},{b:/\$[(.]/},e.METHOD_GUARD,{cN:"class",bK:"class",e:/[{;=]/,eE:!0,i:/[:"\[\]]/,c:[{bK:"extends"},e.UTM]},{bK:"constructor get set",e:/\{/,eE:!0}],i:/#(?!!)/}});hljs.registerLanguage("xml",function(s){var e={eW:!0,i:/`]+/}]}]}]};return{aliases:["html","xhtml","rss","atom","xjb","xsd","xsl","plist","wsf"],cI:!0,c:[{cN:"meta",b:"",r:10,c:[{b:"\\[",e:"\\]"}]},s.C("\x3c!--","--\x3e",{r:10}),{b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{cN:"meta",b:/<\?xml/,e:/\?>/,r:10},{b:/<\?(php)?/,e:/\?>/,sL:"php",c:[{b:"/\\*",e:"\\*/",skip:!0},{b:'b"',e:'"',skip:!0},{b:"b'",e:"'",skip:!0},s.inherit(s.ASM,{i:null,cN:null,c:null,skip:!0}),s.inherit(s.QSM,{i:null,cN:null,c:null,skip:!0})]},{cN:"tag",b:"|$)",e:">",k:{name:"style"},c:[e],starts:{e:"",rE:!0,sL:["css","xml"]}},{cN:"tag",b:"|$)",e:">",k:{name:"script"},c:[e],starts:{e:"<\/script>",rE:!0,sL:["actionscript","javascript","handlebars","xml","vbscript"]}},{cN:"tag",b:"",c:[{cN:"name",b:/[^\/><\s]+/,r:0},e]}]}});hljs.registerLanguage("markdown",function(e){return{aliases:["md","mkdown","mkd"],c:[{cN:"section",v:[{b:"^#{1,6}",e:"$"},{b:"^.+?\\n[=-]{2,}$"}]},{b:"<",e:">",sL:"xml",r:0},{cN:"bullet",b:"^\\s*([*+-]|(\\d+\\.))\\s+"},{cN:"strong",b:"[*_]{2}.+?[*_]{2}"},{cN:"emphasis",v:[{b:"\\*.+?\\*"},{b:"_.+?_",r:0}]},{cN:"quote",b:"^>\\s+",e:"$"},{cN:"code",v:[{b:"^```w*s*$",e:"^```s*$"},{b:"`.+?`"},{b:"^( {4}|\t)",e:"$",r:0}]},{b:"^[-\\*]{3,}",e:"$"},{b:"\\[.+?\\][\\(\\[].*?[\\)\\]]",rB:!0,c:[{cN:"string",b:"\\[",e:"\\]",eB:!0,rE:!0,r:0},{cN:"link",b:"\\]\\(",e:"\\)",eB:!0,eE:!0},{cN:"symbol",b:"\\]\\[",e:"\\]",eB:!0,eE:!0}],r:10},{b:/^\[[^\n]+\]:/,rB:!0,c:[{cN:"symbol",b:/\[/,e:/\]/,eB:!0,eE:!0},{cN:"link",b:/:\s*/,e:/$/,eB:!0}]}]}});hljs.registerLanguage("php",function(e){var c={b:"\\$+[a-zA-Z_-ÿ][a-zA-Z0-9_-ÿ]*"},i={cN:"meta",b:/<\?(php)?|\?>/},t={cN:"string",c:[e.BE,i],v:[{b:'b"',e:'"'},{b:"b'",e:"'"},e.inherit(e.ASM,{i:null}),e.inherit(e.QSM,{i:null})]},a={v:[e.BNM,e.CNM]};return{aliases:["php","php3","php4","php5","php6","php7"],cI:!0,k:"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception default die require __FUNCTION__ enddeclare final try switch continue endfor endif declare unset true false trait goto instanceof insteadof __DIR__ __NAMESPACE__ yield finally",c:[e.HCM,e.C("//","$",{c:[i]}),e.C("/\\*","\\*/",{c:[{cN:"doctag",b:"@[A-Za-z]+"}]}),e.C("__halt_compiler.+?;",!1,{eW:!0,k:"__halt_compiler",l:e.UIR}),{cN:"string",b:/<<<['"]?\w+['"]?$/,e:/^\w+;?$/,c:[e.BE,{cN:"subst",v:[{b:/\$\w+/},{b:/\{\$/,e:/\}/}]}]},i,{cN:"keyword",b:/\$this\b/},c,{b:/(::|->)+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*/},{cN:"function",bK:"function",e:/[;{]/,eE:!0,i:"\\$|\\[|%",c:[e.UTM,{cN:"params",b:"\\(",e:"\\)",c:["self",c,e.CBCM,t,a]}]},{cN:"class",bK:"class interface",e:"{",eE:!0,i:/[:\(\$"]/,c:[{bK:"extends implements"},e.UTM]},{bK:"namespace",e:";",i:/[\.']/,c:[e.UTM]},{bK:"use",e:";",c:[e.UTM]},{b:"=>"},t,a]}});hljs.registerLanguage("armasm",function(s){return{cI:!0,aliases:["arm"],l:"\\.?"+s.IR,k:{meta:".2byte .4byte .align .ascii .asciz .balign .byte .code .data .else .end .endif .endm .endr .equ .err .exitm .extern .global .hword .if .ifdef .ifndef .include .irp .long .macro .rept .req .section .set .skip .space .text .word .arm .thumb .code16 .code32 .force_thumb .thumb_func .ltorg ALIAS ALIGN ARM AREA ASSERT ATTR CN CODE CODE16 CODE32 COMMON CP DATA DCB DCD DCDU DCDO DCFD DCFDU DCI DCQ DCQU DCW DCWU DN ELIF ELSE END ENDFUNC ENDIF ENDP ENTRY EQU EXPORT EXPORTAS EXTERN FIELD FILL FUNCTION GBLA GBLL GBLS GET GLOBAL IF IMPORT INCBIN INCLUDE INFO KEEP LCLA LCLL LCLS LTORG MACRO MAP MEND MEXIT NOFP OPT PRESERVE8 PROC QN READONLY RELOC REQUIRE REQUIRE8 RLIST FN ROUT SETA SETL SETS SN SPACE SUBT THUMB THUMBX TTL WHILE WEND ",built_in:"r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 pc lr sp ip sl sb fp a1 a2 a3 a4 v1 v2 v3 v4 v5 v6 v7 v8 f0 f1 f2 f3 f4 f5 f6 f7 p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15 q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 cpsr_c cpsr_x cpsr_s cpsr_f cpsr_cx cpsr_cxs cpsr_xs cpsr_xsf cpsr_sf cpsr_cxsf spsr_c spsr_x spsr_s spsr_f spsr_cx spsr_cxs spsr_xs spsr_xsf spsr_sf spsr_cxsf s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 {PC} {VAR} {TRUE} {FALSE} {OPT} {CONFIG} {ENDIAN} {CODESIZE} {CPU} {FPU} {ARCHITECTURE} {PCSTOREOFFSET} {ARMASM_VERSION} {INTER} {ROPI} {RWPI} {SWST} {NOSWST} . @"},c:[{cN:"keyword",b:"\\b(adc|(qd?|sh?|u[qh]?)?add(8|16)?|usada?8|(q|sh?|u[qh]?)?(as|sa)x|and|adrl?|sbc|rs[bc]|asr|b[lx]?|blx|bxj|cbn?z|tb[bh]|bic|bfc|bfi|[su]bfx|bkpt|cdp2?|clz|clrex|cmp|cmn|cpsi[ed]|cps|setend|dbg|dmb|dsb|eor|isb|it[te]{0,3}|lsl|lsr|ror|rrx|ldm(([id][ab])|f[ds])?|ldr((s|ex)?[bhd])?|movt?|mvn|mra|mar|mul|[us]mull|smul[bwt][bt]|smu[as]d|smmul|smmla|mla|umlaal|smlal?([wbt][bt]|d)|mls|smlsl?[ds]|smc|svc|sev|mia([bt]{2}|ph)?|mrr?c2?|mcrr2?|mrs|msr|orr|orn|pkh(tb|bt)|rbit|rev(16|sh)?|sel|[su]sat(16)?|nop|pop|push|rfe([id][ab])?|stm([id][ab])?|str(ex)?[bhd]?|(qd?)?sub|(sh?|q|u[qh]?)?sub(8|16)|[su]xt(a?h|a?b(16)?)|srs([id][ab])?|swpb?|swi|smi|tst|teq|wfe|wfi|yield)(eq|ne|cs|cc|mi|pl|vs|vc|hi|ls|ge|lt|gt|le|al|hs|lo)?[sptrx]?",e:"\\s"},s.C("[;@]","$",{r:0}),s.CBCM,s.QSM,{cN:"string",b:"'",e:"[^\\\\]'",r:0},{cN:"title",b:"\\|",e:"\\|",i:"\\n",r:0},{cN:"number",v:[{b:"[#$=]?0x[0-9a-f]+"},{b:"[#$=]?0b[01]+"},{b:"[#$=]\\d+"},{b:"\\b\\d+"}],r:0},{cN:"symbol",v:[{b:"^[a-z_\\.\\$][a-z0-9_\\.\\$]+"},{b:"^\\s*[a-z_\\.\\$][a-z0-9_\\.\\$]+:"},{b:"[=#]\\w+"}],r:0}]}});hljs.registerLanguage("x86asm",function(s){return{cI:!0,l:"[.%]?"+s.IR,k:{keyword:"lock rep repe repz repne repnz xaquire xrelease bnd nobnd aaa aad aam aas adc add and arpl bb0_reset bb1_reset bound bsf bsr bswap bt btc btr bts call cbw cdq cdqe clc cld cli clts cmc cmp cmpsb cmpsd cmpsq cmpsw cmpxchg cmpxchg486 cmpxchg8b cmpxchg16b cpuid cpu_read cpu_write cqo cwd cwde daa das dec div dmint emms enter equ f2xm1 fabs fadd faddp fbld fbstp fchs fclex fcmovb fcmovbe fcmove fcmovnb fcmovnbe fcmovne fcmovnu fcmovu fcom fcomi fcomip fcomp fcompp fcos fdecstp fdisi fdiv fdivp fdivr fdivrp femms feni ffree ffreep fiadd ficom ficomp fidiv fidivr fild fimul fincstp finit fist fistp fisttp fisub fisubr fld fld1 fldcw fldenv fldl2e fldl2t fldlg2 fldln2 fldpi fldz fmul fmulp fnclex fndisi fneni fninit fnop fnsave fnstcw fnstenv fnstsw fpatan fprem fprem1 fptan frndint frstor fsave fscale fsetpm fsin fsincos fsqrt fst fstcw fstenv fstp fstsw fsub fsubp fsubr fsubrp ftst fucom fucomi fucomip fucomp fucompp fxam fxch fxtract fyl2x fyl2xp1 hlt ibts icebp idiv imul in inc incbin insb insd insw int int01 int1 int03 int3 into invd invpcid invlpg invlpga iret iretd iretq iretw jcxz jecxz jrcxz jmp jmpe lahf lar lds lea leave les lfence lfs lgdt lgs lidt lldt lmsw loadall loadall286 lodsb lodsd lodsq lodsw loop loope loopne loopnz loopz lsl lss ltr mfence monitor mov movd movq movsb movsd movsq movsw movsx movsxd movzx mul mwait neg nop not or out outsb outsd outsw packssdw packsswb packuswb paddb paddd paddsb paddsiw paddsw paddusb paddusw paddw pand pandn pause paveb pavgusb pcmpeqb pcmpeqd pcmpeqw pcmpgtb pcmpgtd pcmpgtw pdistib pf2id pfacc pfadd pfcmpeq pfcmpge pfcmpgt pfmax pfmin pfmul pfrcp pfrcpit1 pfrcpit2 pfrsqit1 pfrsqrt pfsub pfsubr pi2fd pmachriw pmaddwd pmagw pmulhriw pmulhrwa pmulhrwc pmulhw pmullw pmvgezb pmvlzb pmvnzb pmvzb pop popa popad popaw popf popfd popfq popfw por prefetch prefetchw pslld psllq psllw psrad psraw psrld psrlq psrlw psubb psubd psubsb psubsiw psubsw psubusb psubusw psubw punpckhbw punpckhdq punpckhwd punpcklbw punpckldq punpcklwd push pusha pushad pushaw pushf pushfd pushfq pushfw pxor rcl rcr rdshr rdmsr rdpmc rdtsc rdtscp ret retf retn rol ror rdm rsdc rsldt rsm rsts sahf sal salc sar sbb scasb scasd scasq scasw sfence sgdt shl shld shr shrd sidt sldt skinit smi smint smintold smsw stc std sti stosb stosd stosq stosw str sub svdc svldt svts swapgs syscall sysenter sysexit sysret test ud0 ud1 ud2b ud2 ud2a umov verr verw fwait wbinvd wrshr wrmsr xadd xbts xchg xlatb xlat xor cmove cmovz cmovne cmovnz cmova cmovnbe cmovae cmovnb cmovb cmovnae cmovbe cmovna cmovg cmovnle cmovge cmovnl cmovl cmovnge cmovle cmovng cmovc cmovnc cmovo cmovno cmovs cmovns cmovp cmovpe cmovnp cmovpo je jz jne jnz ja jnbe jae jnb jb jnae jbe jna jg jnle jge jnl jl jnge jle jng jc jnc jo jno js jns jpo jnp jpe jp sete setz setne setnz seta setnbe setae setnb setnc setb setnae setcset setbe setna setg setnle setge setnl setl setnge setle setng sets setns seto setno setpe setp setpo setnp addps addss andnps andps cmpeqps cmpeqss cmpleps cmpless cmpltps cmpltss cmpneqps cmpneqss cmpnleps cmpnless cmpnltps cmpnltss cmpordps cmpordss cmpunordps cmpunordss cmpps cmpss comiss cvtpi2ps cvtps2pi cvtsi2ss cvtss2si cvttps2pi cvttss2si divps divss ldmxcsr maxps maxss minps minss movaps movhps movlhps movlps movhlps movmskps movntps movss movups mulps mulss orps rcpps rcpss rsqrtps rsqrtss shufps sqrtps sqrtss stmxcsr subps subss ucomiss unpckhps unpcklps xorps fxrstor fxrstor64 fxsave fxsave64 xgetbv xsetbv xsave xsave64 xsaveopt xsaveopt64 xrstor xrstor64 prefetchnta prefetcht0 prefetcht1 prefetcht2 maskmovq movntq pavgb pavgw pextrw pinsrw pmaxsw pmaxub pminsw pminub pmovmskb pmulhuw psadbw pshufw pf2iw pfnacc pfpnacc pi2fw pswapd maskmovdqu clflush movntdq movnti movntpd movdqa movdqu movdq2q movq2dq paddq pmuludq pshufd pshufhw pshuflw pslldq psrldq psubq punpckhqdq punpcklqdq addpd addsd andnpd andpd cmpeqpd cmpeqsd cmplepd cmplesd cmpltpd cmpltsd cmpneqpd cmpneqsd cmpnlepd cmpnlesd cmpnltpd cmpnltsd cmpordpd cmpordsd cmpunordpd cmpunordsd cmppd comisd cvtdq2pd cvtdq2ps cvtpd2dq cvtpd2pi cvtpd2ps cvtpi2pd cvtps2dq cvtps2pd cvtsd2si cvtsd2ss cvtsi2sd cvtss2sd cvttpd2pi cvttpd2dq cvttps2dq cvttsd2si divpd divsd maxpd maxsd minpd minsd movapd movhpd movlpd movmskpd movupd mulpd mulsd orpd shufpd sqrtpd sqrtsd subpd subsd ucomisd unpckhpd unpcklpd xorpd addsubpd addsubps haddpd haddps hsubpd hsubps lddqu movddup movshdup movsldup clgi stgi vmcall vmclear vmfunc vmlaunch vmload vmmcall vmptrld vmptrst vmread vmresume vmrun vmsave vmwrite vmxoff vmxon invept invvpid pabsb pabsw pabsd palignr phaddw phaddd phaddsw phsubw phsubd phsubsw pmaddubsw pmulhrsw pshufb psignb psignw psignd extrq insertq movntsd movntss lzcnt blendpd blendps blendvpd blendvps dppd dpps extractps insertps movntdqa mpsadbw packusdw pblendvb pblendw pcmpeqq pextrb pextrd pextrq phminposuw pinsrb pinsrd pinsrq pmaxsb pmaxsd pmaxud pmaxuw pminsb pminsd pminud pminuw pmovsxbw pmovsxbd pmovsxbq pmovsxwd pmovsxwq pmovsxdq pmovzxbw pmovzxbd pmovzxbq pmovzxwd pmovzxwq pmovzxdq pmuldq pmulld ptest roundpd roundps roundsd roundss crc32 pcmpestri pcmpestrm pcmpistri pcmpistrm pcmpgtq popcnt getsec pfrcpv pfrsqrtv movbe aesenc aesenclast aesdec aesdeclast aesimc aeskeygenassist vaesenc vaesenclast vaesdec vaesdeclast vaesimc vaeskeygenassist vaddpd vaddps vaddsd vaddss vaddsubpd vaddsubps vandpd vandps vandnpd vandnps vblendpd vblendps vblendvpd vblendvps vbroadcastss vbroadcastsd vbroadcastf128 vcmpeq_ospd vcmpeqpd vcmplt_ospd vcmpltpd vcmple_ospd vcmplepd vcmpunord_qpd vcmpunordpd vcmpneq_uqpd vcmpneqpd vcmpnlt_uspd vcmpnltpd vcmpnle_uspd vcmpnlepd vcmpord_qpd vcmpordpd vcmpeq_uqpd vcmpnge_uspd vcmpngepd vcmpngt_uspd vcmpngtpd vcmpfalse_oqpd vcmpfalsepd vcmpneq_oqpd vcmpge_ospd vcmpgepd vcmpgt_ospd vcmpgtpd vcmptrue_uqpd vcmptruepd vcmplt_oqpd vcmple_oqpd vcmpunord_spd vcmpneq_uspd vcmpnlt_uqpd vcmpnle_uqpd vcmpord_spd vcmpeq_uspd vcmpnge_uqpd vcmpngt_uqpd vcmpfalse_ospd vcmpneq_ospd vcmpge_oqpd vcmpgt_oqpd vcmptrue_uspd vcmppd vcmpeq_osps vcmpeqps vcmplt_osps vcmpltps vcmple_osps vcmpleps vcmpunord_qps vcmpunordps vcmpneq_uqps vcmpneqps vcmpnlt_usps vcmpnltps vcmpnle_usps vcmpnleps vcmpord_qps vcmpordps vcmpeq_uqps vcmpnge_usps vcmpngeps vcmpngt_usps vcmpngtps vcmpfalse_oqps vcmpfalseps vcmpneq_oqps vcmpge_osps vcmpgeps vcmpgt_osps vcmpgtps vcmptrue_uqps vcmptrueps vcmplt_oqps vcmple_oqps vcmpunord_sps vcmpneq_usps vcmpnlt_uqps vcmpnle_uqps vcmpord_sps vcmpeq_usps vcmpnge_uqps vcmpngt_uqps vcmpfalse_osps vcmpneq_osps vcmpge_oqps vcmpgt_oqps vcmptrue_usps vcmpps vcmpeq_ossd vcmpeqsd vcmplt_ossd vcmpltsd vcmple_ossd vcmplesd vcmpunord_qsd vcmpunordsd vcmpneq_uqsd vcmpneqsd vcmpnlt_ussd vcmpnltsd vcmpnle_ussd vcmpnlesd vcmpord_qsd vcmpordsd vcmpeq_uqsd vcmpnge_ussd vcmpngesd vcmpngt_ussd vcmpngtsd vcmpfalse_oqsd vcmpfalsesd vcmpneq_oqsd vcmpge_ossd vcmpgesd vcmpgt_ossd vcmpgtsd vcmptrue_uqsd vcmptruesd vcmplt_oqsd vcmple_oqsd vcmpunord_ssd vcmpneq_ussd vcmpnlt_uqsd vcmpnle_uqsd vcmpord_ssd vcmpeq_ussd vcmpnge_uqsd vcmpngt_uqsd vcmpfalse_ossd vcmpneq_ossd vcmpge_oqsd vcmpgt_oqsd vcmptrue_ussd vcmpsd vcmpeq_osss vcmpeqss vcmplt_osss vcmpltss vcmple_osss vcmpless vcmpunord_qss vcmpunordss vcmpneq_uqss vcmpneqss vcmpnlt_usss vcmpnltss vcmpnle_usss vcmpnless vcmpord_qss vcmpordss vcmpeq_uqss vcmpnge_usss vcmpngess vcmpngt_usss vcmpngtss vcmpfalse_oqss vcmpfalsess vcmpneq_oqss vcmpge_osss vcmpgess vcmpgt_osss vcmpgtss vcmptrue_uqss vcmptruess vcmplt_oqss vcmple_oqss vcmpunord_sss vcmpneq_usss vcmpnlt_uqss vcmpnle_uqss vcmpord_sss vcmpeq_usss vcmpnge_uqss vcmpngt_uqss vcmpfalse_osss vcmpneq_osss vcmpge_oqss vcmpgt_oqss vcmptrue_usss vcmpss vcomisd vcomiss vcvtdq2pd vcvtdq2ps vcvtpd2dq vcvtpd2ps vcvtps2dq vcvtps2pd vcvtsd2si vcvtsd2ss vcvtsi2sd vcvtsi2ss vcvtss2sd vcvtss2si vcvttpd2dq vcvttps2dq vcvttsd2si vcvttss2si vdivpd vdivps vdivsd vdivss vdppd vdpps vextractf128 vextractps vhaddpd vhaddps vhsubpd vhsubps vinsertf128 vinsertps vlddqu vldqqu vldmxcsr vmaskmovdqu vmaskmovps vmaskmovpd vmaxpd vmaxps vmaxsd vmaxss vminpd vminps vminsd vminss vmovapd vmovaps vmovd vmovq vmovddup vmovdqa vmovqqa vmovdqu vmovqqu vmovhlps vmovhpd vmovhps vmovlhps vmovlpd vmovlps vmovmskpd vmovmskps vmovntdq vmovntqq vmovntdqa vmovntpd vmovntps vmovsd vmovshdup vmovsldup vmovss vmovupd vmovups vmpsadbw vmulpd vmulps vmulsd vmulss vorpd vorps vpabsb vpabsw vpabsd vpacksswb vpackssdw vpackuswb vpackusdw vpaddb vpaddw vpaddd vpaddq vpaddsb vpaddsw vpaddusb vpaddusw vpalignr vpand vpandn vpavgb vpavgw vpblendvb vpblendw vpcmpestri vpcmpestrm vpcmpistri vpcmpistrm vpcmpeqb vpcmpeqw vpcmpeqd vpcmpeqq vpcmpgtb vpcmpgtw vpcmpgtd vpcmpgtq vpermilpd vpermilps vperm2f128 vpextrb vpextrw vpextrd vpextrq vphaddw vphaddd vphaddsw vphminposuw vphsubw vphsubd vphsubsw vpinsrb vpinsrw vpinsrd vpinsrq vpmaddwd vpmaddubsw vpmaxsb vpmaxsw vpmaxsd vpmaxub vpmaxuw vpmaxud vpminsb vpminsw vpminsd vpminub vpminuw vpminud vpmovmskb vpmovsxbw vpmovsxbd vpmovsxbq vpmovsxwd vpmovsxwq vpmovsxdq vpmovzxbw vpmovzxbd vpmovzxbq vpmovzxwd vpmovzxwq vpmovzxdq vpmulhuw vpmulhrsw vpmulhw vpmullw vpmulld vpmuludq vpmuldq vpor vpsadbw vpshufb vpshufd vpshufhw vpshuflw vpsignb vpsignw vpsignd vpslldq vpsrldq vpsllw vpslld vpsllq vpsraw vpsrad vpsrlw vpsrld vpsrlq vptest vpsubb vpsubw vpsubd vpsubq vpsubsb vpsubsw vpsubusb vpsubusw vpunpckhbw vpunpckhwd vpunpckhdq vpunpckhqdq vpunpcklbw vpunpcklwd vpunpckldq vpunpcklqdq vpxor vrcpps vrcpss vrsqrtps vrsqrtss vroundpd vroundps vroundsd vroundss vshufpd vshufps vsqrtpd vsqrtps vsqrtsd vsqrtss vstmxcsr vsubpd vsubps vsubsd vsubss vtestps vtestpd vucomisd vucomiss vunpckhpd vunpckhps vunpcklpd vunpcklps vxorpd vxorps vzeroall vzeroupper pclmullqlqdq pclmulhqlqdq pclmullqhqdq pclmulhqhqdq pclmulqdq vpclmullqlqdq vpclmulhqlqdq vpclmullqhqdq vpclmulhqhqdq vpclmulqdq vfmadd132ps vfmadd132pd vfmadd312ps vfmadd312pd vfmadd213ps vfmadd213pd vfmadd123ps vfmadd123pd vfmadd231ps vfmadd231pd vfmadd321ps vfmadd321pd vfmaddsub132ps vfmaddsub132pd vfmaddsub312ps vfmaddsub312pd vfmaddsub213ps vfmaddsub213pd vfmaddsub123ps vfmaddsub123pd vfmaddsub231ps vfmaddsub231pd vfmaddsub321ps vfmaddsub321pd vfmsub132ps vfmsub132pd vfmsub312ps vfmsub312pd vfmsub213ps vfmsub213pd vfmsub123ps vfmsub123pd vfmsub231ps vfmsub231pd vfmsub321ps vfmsub321pd vfmsubadd132ps vfmsubadd132pd vfmsubadd312ps vfmsubadd312pd vfmsubadd213ps vfmsubadd213pd vfmsubadd123ps vfmsubadd123pd vfmsubadd231ps vfmsubadd231pd vfmsubadd321ps vfmsubadd321pd vfnmadd132ps vfnmadd132pd vfnmadd312ps vfnmadd312pd vfnmadd213ps vfnmadd213pd vfnmadd123ps vfnmadd123pd vfnmadd231ps vfnmadd231pd vfnmadd321ps vfnmadd321pd vfnmsub132ps vfnmsub132pd vfnmsub312ps vfnmsub312pd vfnmsub213ps vfnmsub213pd vfnmsub123ps vfnmsub123pd vfnmsub231ps vfnmsub231pd vfnmsub321ps vfnmsub321pd vfmadd132ss vfmadd132sd vfmadd312ss vfmadd312sd vfmadd213ss vfmadd213sd vfmadd123ss vfmadd123sd vfmadd231ss vfmadd231sd vfmadd321ss vfmadd321sd vfmsub132ss vfmsub132sd vfmsub312ss vfmsub312sd vfmsub213ss vfmsub213sd vfmsub123ss vfmsub123sd vfmsub231ss vfmsub231sd vfmsub321ss vfmsub321sd vfnmadd132ss vfnmadd132sd vfnmadd312ss vfnmadd312sd vfnmadd213ss vfnmadd213sd vfnmadd123ss vfnmadd123sd vfnmadd231ss vfnmadd231sd vfnmadd321ss vfnmadd321sd vfnmsub132ss vfnmsub132sd vfnmsub312ss vfnmsub312sd vfnmsub213ss vfnmsub213sd vfnmsub123ss vfnmsub123sd vfnmsub231ss vfnmsub231sd vfnmsub321ss vfnmsub321sd rdfsbase rdgsbase rdrand wrfsbase wrgsbase vcvtph2ps vcvtps2ph adcx adox rdseed clac stac xstore xcryptecb xcryptcbc xcryptctr xcryptcfb xcryptofb montmul xsha1 xsha256 llwpcb slwpcb lwpval lwpins vfmaddpd vfmaddps vfmaddsd vfmaddss vfmaddsubpd vfmaddsubps vfmsubaddpd vfmsubaddps vfmsubpd vfmsubps vfmsubsd vfmsubss vfnmaddpd vfnmaddps vfnmaddsd vfnmaddss vfnmsubpd vfnmsubps vfnmsubsd vfnmsubss vfrczpd vfrczps vfrczsd vfrczss vpcmov vpcomb vpcomd vpcomq vpcomub vpcomud vpcomuq vpcomuw vpcomw vphaddbd vphaddbq vphaddbw vphadddq vphaddubd vphaddubq vphaddubw vphaddudq vphadduwd vphadduwq vphaddwd vphaddwq vphsubbw vphsubdq vphsubwd vpmacsdd vpmacsdqh vpmacsdql vpmacssdd vpmacssdqh vpmacssdql vpmacsswd vpmacssww vpmacswd vpmacsww vpmadcsswd vpmadcswd vpperm vprotb vprotd vprotq vprotw vpshab vpshad vpshaq vpshaw vpshlb vpshld vpshlq vpshlw vbroadcasti128 vpblendd vpbroadcastb vpbroadcastw vpbroadcastd vpbroadcastq vpermd vpermpd vpermps vpermq vperm2i128 vextracti128 vinserti128 vpmaskmovd vpmaskmovq vpsllvd vpsllvq vpsravd vpsrlvd vpsrlvq vgatherdpd vgatherqpd vgatherdps vgatherqps vpgatherdd vpgatherqd vpgatherdq vpgatherqq xabort xbegin xend xtest andn bextr blci blcic blsi blsic blcfill blsfill blcmsk blsmsk blsr blcs bzhi mulx pdep pext rorx sarx shlx shrx tzcnt tzmsk t1mskc valignd valignq vblendmpd vblendmps vbroadcastf32x4 vbroadcastf64x4 vbroadcasti32x4 vbroadcasti64x4 vcompresspd vcompressps vcvtpd2udq vcvtps2udq vcvtsd2usi vcvtss2usi vcvttpd2udq vcvttps2udq vcvttsd2usi vcvttss2usi vcvtudq2pd vcvtudq2ps vcvtusi2sd vcvtusi2ss vexpandpd vexpandps vextractf32x4 vextractf64x4 vextracti32x4 vextracti64x4 vfixupimmpd vfixupimmps vfixupimmsd vfixupimmss vgetexppd vgetexpps vgetexpsd vgetexpss vgetmantpd vgetmantps vgetmantsd vgetmantss vinsertf32x4 vinsertf64x4 vinserti32x4 vinserti64x4 vmovdqa32 vmovdqa64 vmovdqu32 vmovdqu64 vpabsq vpandd vpandnd vpandnq vpandq vpblendmd vpblendmq vpcmpltd vpcmpled vpcmpneqd vpcmpnltd vpcmpnled vpcmpd vpcmpltq vpcmpleq vpcmpneqq vpcmpnltq vpcmpnleq vpcmpq vpcmpequd vpcmpltud vpcmpleud vpcmpnequd vpcmpnltud vpcmpnleud vpcmpud vpcmpequq vpcmpltuq vpcmpleuq vpcmpnequq vpcmpnltuq vpcmpnleuq vpcmpuq vpcompressd vpcompressq vpermi2d vpermi2pd vpermi2ps vpermi2q vpermt2d vpermt2pd vpermt2ps vpermt2q vpexpandd vpexpandq vpmaxsq vpmaxuq vpminsq vpminuq vpmovdb vpmovdw vpmovqb vpmovqd vpmovqw vpmovsdb vpmovsdw vpmovsqb vpmovsqd vpmovsqw vpmovusdb vpmovusdw vpmovusqb vpmovusqd vpmovusqw vpord vporq vprold vprolq vprolvd vprolvq vprord vprorq vprorvd vprorvq vpscatterdd vpscatterdq vpscatterqd vpscatterqq vpsraq vpsravq vpternlogd vpternlogq vptestmd vptestmq vptestnmd vptestnmq vpxord vpxorq vrcp14pd vrcp14ps vrcp14sd vrcp14ss vrndscalepd vrndscaleps vrndscalesd vrndscaless vrsqrt14pd vrsqrt14ps vrsqrt14sd vrsqrt14ss vscalefpd vscalefps vscalefsd vscalefss vscatterdpd vscatterdps vscatterqpd vscatterqps vshuff32x4 vshuff64x2 vshufi32x4 vshufi64x2 kandnw kandw kmovw knotw kortestw korw kshiftlw kshiftrw kunpckbw kxnorw kxorw vpbroadcastmb2q vpbroadcastmw2d vpconflictd vpconflictq vplzcntd vplzcntq vexp2pd vexp2ps vrcp28pd vrcp28ps vrcp28sd vrcp28ss vrsqrt28pd vrsqrt28ps vrsqrt28sd vrsqrt28ss vgatherpf0dpd vgatherpf0dps vgatherpf0qpd vgatherpf0qps vgatherpf1dpd vgatherpf1dps vgatherpf1qpd vgatherpf1qps vscatterpf0dpd vscatterpf0dps vscatterpf0qpd vscatterpf0qps vscatterpf1dpd vscatterpf1dps vscatterpf1qpd vscatterpf1qps prefetchwt1 bndmk bndcl bndcu bndcn bndmov bndldx bndstx sha1rnds4 sha1nexte sha1msg1 sha1msg2 sha256rnds2 sha256msg1 sha256msg2 hint_nop0 hint_nop1 hint_nop2 hint_nop3 hint_nop4 hint_nop5 hint_nop6 hint_nop7 hint_nop8 hint_nop9 hint_nop10 hint_nop11 hint_nop12 hint_nop13 hint_nop14 hint_nop15 hint_nop16 hint_nop17 hint_nop18 hint_nop19 hint_nop20 hint_nop21 hint_nop22 hint_nop23 hint_nop24 hint_nop25 hint_nop26 hint_nop27 hint_nop28 hint_nop29 hint_nop30 hint_nop31 hint_nop32 hint_nop33 hint_nop34 hint_nop35 hint_nop36 hint_nop37 hint_nop38 hint_nop39 hint_nop40 hint_nop41 hint_nop42 hint_nop43 hint_nop44 hint_nop45 hint_nop46 hint_nop47 hint_nop48 hint_nop49 hint_nop50 hint_nop51 hint_nop52 hint_nop53 hint_nop54 hint_nop55 hint_nop56 hint_nop57 hint_nop58 hint_nop59 hint_nop60 hint_nop61 hint_nop62 hint_nop63",built_in:"ip eip rip al ah bl bh cl ch dl dh sil dil bpl spl r8b r9b r10b r11b r12b r13b r14b r15b ax bx cx dx si di bp sp r8w r9w r10w r11w r12w r13w r14w r15w eax ebx ecx edx esi edi ebp esp eip r8d r9d r10d r11d r12d r13d r14d r15d rax rbx rcx rdx rsi rdi rbp rsp r8 r9 r10 r11 r12 r13 r14 r15 cs ds es fs gs ss st st0 st1 st2 st3 st4 st5 st6 st7 mm0 mm1 mm2 mm3 mm4 mm5 mm6 mm7 xmm0 xmm1 xmm2 xmm3 xmm4 xmm5 xmm6 xmm7 xmm8 xmm9 xmm10 xmm11 xmm12 xmm13 xmm14 xmm15 xmm16 xmm17 xmm18 xmm19 xmm20 xmm21 xmm22 xmm23 xmm24 xmm25 xmm26 xmm27 xmm28 xmm29 xmm30 xmm31 ymm0 ymm1 ymm2 ymm3 ymm4 ymm5 ymm6 ymm7 ymm8 ymm9 ymm10 ymm11 ymm12 ymm13 ymm14 ymm15 ymm16 ymm17 ymm18 ymm19 ymm20 ymm21 ymm22 ymm23 ymm24 ymm25 ymm26 ymm27 ymm28 ymm29 ymm30 ymm31 zmm0 zmm1 zmm2 zmm3 zmm4 zmm5 zmm6 zmm7 zmm8 zmm9 zmm10 zmm11 zmm12 zmm13 zmm14 zmm15 zmm16 zmm17 zmm18 zmm19 zmm20 zmm21 zmm22 zmm23 zmm24 zmm25 zmm26 zmm27 zmm28 zmm29 zmm30 zmm31 k0 k1 k2 k3 k4 k5 k6 k7 bnd0 bnd1 bnd2 bnd3 cr0 cr1 cr2 cr3 cr4 cr8 dr0 dr1 dr2 dr3 dr8 tr3 tr4 tr5 tr6 tr7 r0 r1 r2 r3 r4 r5 r6 r7 r0b r1b r2b r3b r4b r5b r6b r7b r0w r1w r2w r3w r4w r5w r6w r7w r0d r1d r2d r3d r4d r5d r6d r7d r0h r1h r2h r3h r0l r1l r2l r3l r4l r5l r6l r7l r8l r9l r10l r11l r12l r13l r14l r15l db dw dd dq dt ddq do dy dz resb resw resd resq rest resdq reso resy resz incbin equ times byte word dword qword nosplit rel abs seg wrt strict near far a32 ptr",meta:"%define %xdefine %+ %undef %defstr %deftok %assign %strcat %strlen %substr %rotate %elif %else %endif %if %ifmacro %ifctx %ifidn %ifidni %ifid %ifnum %ifstr %iftoken %ifempty %ifenv %error %warning %fatal %rep %endrep %include %push %pop %repl %pathsearch %depend %use %arg %stacksize %local %line %comment %endcomment .nolist __FILE__ __LINE__ __SECT__ __BITS__ __OUTPUT_FORMAT__ __DATE__ __TIME__ __DATE_NUM__ __TIME_NUM__ __UTC_DATE__ __UTC_TIME__ __UTC_DATE_NUM__ __UTC_TIME_NUM__ __PASS__ struc endstruc istruc at iend align alignb sectalign daz nodaz up down zero default option assume public bits use16 use32 use64 default section segment absolute extern global common cpu float __utf16__ __utf16le__ __utf16be__ __utf32__ __utf32le__ __utf32be__ __float8__ __float16__ __float32__ __float64__ __float80m__ __float80e__ __float128l__ __float128h__ __Infinity__ __QNaN__ __SNaN__ Inf NaN QNaN SNaN float8 float16 float32 float64 float80m float80e float128l float128h __FLOAT_DAZ__ __FLOAT_ROUND__ __FLOAT__"},c:[s.C(";","$",{r:0}),{cN:"number",v:[{b:"\\b(?:([0-9][0-9_]*)?\\.[0-9_]*(?:[eE][+-]?[0-9_]+)?|(0[Xx])?[0-9][0-9_]*\\.?[0-9_]*(?:[pP](?:[+-]?[0-9_]+)?)?)\\b",r:0},{b:"\\$[0-9][0-9A-Fa-f]*",r:0},{b:"\\b(?:[0-9A-Fa-f][0-9A-Fa-f_]*[Hh]|[0-9][0-9_]*[DdTt]?|[0-7][0-7_]*[QqOo]|[0-1][0-1_]*[BbYy])\\b"},{b:"\\b(?:0[Xx][0-9A-Fa-f_]+|0[DdTt][0-9_]+|0[QqOo][0-7_]+|0[BbYy][0-1_]+)\\b"}]},s.QSM,{cN:"string",v:[{b:"'",e:"[^\\\\]'"},{b:"`",e:"[^\\\\]`"}],r:0},{cN:"symbol",v:[{b:"^\\s*[A-Za-z._?][A-Za-z0-9_$#@~.?]*(:|\\s+label)"},{b:"^\\s*%%[A-Za-z0-9_$#@~.?]*:"}],r:0},{cN:"subst",b:"%[0-9]+",r:0},{cN:"subst",b:"%!S+",r:0},{cN:"meta",b:/^\s*\.[\w_-]+/}]}});hljs.registerLanguage("ruby",function(e){var b="[a-zA-Z_]\\w*[!?=]?|[-+~]\\@|<<|>>|=~|===?|<=>|[<>]=?|\\*\\*|[-/+%^&*~`|]|\\[\\]=?",r={keyword:"and then defined module in return redo if BEGIN retry end for self when next until do begin unless END rescue else break undef not super class case require yield alias while ensure elsif or include attr_reader attr_writer attr_accessor",literal:"true false nil"},c={cN:"doctag",b:"@[A-Za-z]+"},a={b:"#<",e:">"},s=[e.C("#","$",{c:[c]}),e.C("^\\=begin","^\\=end",{c:[c],r:10}),e.C("^__END__","\\n$")],n={cN:"subst",b:"#\\{",e:"}",k:r},t={cN:"string",c:[e.BE,n],v:[{b:/'/,e:/'/},{b:/"/,e:/"/},{b:/`/,e:/`/},{b:"%[qQwWx]?\\(",e:"\\)"},{b:"%[qQwWx]?\\[",e:"\\]"},{b:"%[qQwWx]?{",e:"}"},{b:"%[qQwWx]?<",e:">"},{b:"%[qQwWx]?/",e:"/"},{b:"%[qQwWx]?%",e:"%"},{b:"%[qQwWx]?-",e:"-"},{b:"%[qQwWx]?\\|",e:"\\|"},{b:/\B\?(\\\d{1,3}|\\x[A-Fa-f0-9]{1,2}|\\u[A-Fa-f0-9]{4}|\\?\S)\b/},{b:/<<[-~]?'?(\w+)(?:.|\n)*?\n\s*\1\b/,rB:!0,c:[{b:/<<[-~]?'?/},{b:/\w+/,endSameAsBegin:!0,c:[e.BE,n]}]}]},i={cN:"params",b:"\\(",e:"\\)",endsParent:!0,k:r},d=[t,a,{cN:"class",bK:"class module",e:"$|;",i:/=/,c:[e.inherit(e.TM,{b:"[A-Za-z_]\\w*(::\\w+)*(\\?|\\!)?"}),{b:"<\\s*",c:[{b:"("+e.IR+"::)?"+e.IR}]}].concat(s)},{cN:"function",bK:"def",e:"$|;",c:[e.inherit(e.TM,{b:b}),i].concat(s)},{b:e.IR+"::"},{cN:"symbol",b:e.UIR+"(\\!|\\?)?:",r:0},{cN:"symbol",b:":(?!\\s)",c:[t,{b:b}],r:0},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{b:"(\\$\\W)|((\\$|\\@\\@?)(\\w+))"},{cN:"params",b:/\|/,e:/\|/,k:r},{b:"("+e.RSR+"|unless)\\s*",k:"unless",c:[a,{cN:"regexp",c:[e.BE,n],i:/\n/,v:[{b:"/",e:"/[a-z]*"},{b:"%r{",e:"}[a-z]*"},{b:"%r\\(",e:"\\)[a-z]*"},{b:"%r!",e:"![a-z]*"},{b:"%r\\[",e:"\\][a-z]*"}]}].concat(s),r:0}].concat(s);n.c=d;var l=[{b:/^\s*=>/,starts:{e:"$",c:i.c=d}},{cN:"meta",b:"^([>?]>|[\\w#]+\\(\\w+\\):\\d+:\\d+>|(\\w+-)?\\d+\\.\\d+\\.\\d(p\\d+)?[^>]+>)",starts:{e:"$",c:d}}];return{aliases:["rb","gemspec","podspec","thor","irb"],k:r,i:/\/\*/,c:s.concat(l).concat(d)}});hljs.registerLanguage("yaml",function(e){var b="true false yes no null",a="^[ \\-]*",r="[a-zA-Z_][\\w\\-]*",t={cN:"attr",v:[{b:a+r+":"},{b:a+'"'+r+'":'},{b:a+"'"+r+"':"}]},c={cN:"string",r:0,v:[{b:/'/,e:/'/},{b:/"/,e:/"/},{b:/\S+/}],c:[e.BE,{cN:"template-variable",v:[{b:"{{",e:"}}"},{b:"%{",e:"}"}]}]};return{cI:!0,aliases:["yml","YAML","yaml"],c:[t,{cN:"meta",b:"^---s*$",r:10},{cN:"string",b:"[\\|>] *$",rE:!0,c:c.c,e:t.v[0].b},{b:"<%[%=-]?",e:"[%-]?%>",sL:"ruby",eB:!0,eE:!0,r:0},{cN:"type",b:"!"+e.UIR},{cN:"type",b:"!!"+e.UIR},{cN:"meta",b:"&"+e.UIR+"$"},{cN:"meta",b:"\\*"+e.UIR+"$"},{cN:"bullet",b:"^ *-",r:0},e.HCM,{bK:b,k:{literal:b}},e.CNM,c]}});hljs.registerLanguage("julia",function(e){var r={keyword:"in isa where baremodule begin break catch ccall const continue do else elseif end export false finally for function global if import importall let local macro module quote return true try using while type immutable abstract bitstype typealias ",literal:"true false ARGS C_NULL DevNull ENDIAN_BOM ENV I Inf Inf16 Inf32 Inf64 InsertionSort JULIA_HOME LOAD_PATH MergeSort NaN NaN16 NaN32 NaN64 PROGRAM_FILE QuickSort RoundDown RoundFromZero RoundNearest RoundNearestTiesAway RoundNearestTiesUp RoundToZero RoundUp STDERR STDIN STDOUT VERSION catalan e|0 eu|0 eulergamma golden im nothing pi γ π φ ",built_in:"ANY AbstractArray AbstractChannel AbstractFloat AbstractMatrix AbstractRNG AbstractSerializer AbstractSet AbstractSparseArray AbstractSparseMatrix AbstractSparseVector AbstractString AbstractUnitRange AbstractVecOrMat AbstractVector Any ArgumentError Array AssertionError Associative Base64DecodePipe Base64EncodePipe Bidiagonal BigFloat BigInt BitArray BitMatrix BitVector Bool BoundsError BufferStream CachingPool CapturedException CartesianIndex CartesianRange Cchar Cdouble Cfloat Channel Char Cint Cintmax_t Clong Clonglong ClusterManager Cmd CodeInfo Colon Complex Complex128 Complex32 Complex64 CompositeException Condition ConjArray ConjMatrix ConjVector Cptrdiff_t Cshort Csize_t Cssize_t Cstring Cuchar Cuint Cuintmax_t Culong Culonglong Cushort Cwchar_t Cwstring DataType Date DateFormat DateTime DenseArray DenseMatrix DenseVecOrMat DenseVector Diagonal Dict DimensionMismatch Dims DirectIndexString Display DivideError DomainError EOFError EachLine Enum Enumerate ErrorException Exception ExponentialBackOff Expr Factorization FileMonitor Float16 Float32 Float64 Function Future GlobalRef GotoNode HTML Hermitian IO IOBuffer IOContext IOStream IPAddr IPv4 IPv6 IndexCartesian IndexLinear IndexStyle InexactError InitError Int Int128 Int16 Int32 Int64 Int8 IntSet Integer InterruptException InvalidStateException Irrational KeyError LabelNode LinSpace LineNumberNode LoadError LowerTriangular MIME Matrix MersenneTwister Method MethodError MethodTable Module NTuple NewvarNode NullException Nullable Number ObjectIdDict OrdinalRange OutOfMemoryError OverflowError Pair ParseError PartialQuickSort PermutedDimsArray Pipe PollingFileWatcher ProcessExitedException Ptr QuoteNode RandomDevice Range RangeIndex Rational RawFD ReadOnlyMemoryError Real ReentrantLock Ref Regex RegexMatch RemoteChannel RemoteException RevString RoundingMode RowVector SSAValue SegmentationFault SerializationState Set SharedArray SharedMatrix SharedVector Signed SimpleVector Slot SlotNumber SparseMatrixCSC SparseVector StackFrame StackOverflowError StackTrace StepRange StepRangeLen StridedArray StridedMatrix StridedVecOrMat StridedVector String SubArray SubString SymTridiagonal Symbol Symmetric SystemError TCPSocket Task Text TextDisplay Timer Tridiagonal Tuple Type TypeError TypeMapEntry TypeMapLevel TypeName TypeVar TypedSlot UDPSocket UInt UInt128 UInt16 UInt32 UInt64 UInt8 UndefRefError UndefVarError UnicodeError UniformScaling Union UnionAll UnitRange Unsigned UpperTriangular Val Vararg VecElement VecOrMat Vector VersionNumber Void WeakKeyDict WeakRef WorkerConfig WorkerPool "},t="[A-Za-z_\\u00A1-\\uFFFF][A-Za-z_0-9\\u00A1-\\uFFFF]*",a={l:t,k:r,i:/<\//},n={cN:"subst",b:/\$\(/,e:/\)/,k:r},o={cN:"variable",b:"\\$"+t},i={cN:"string",c:[e.BE,n,o],v:[{b:/\w*"""/,e:/"""\w*/,r:10},{b:/\w*"/,e:/"\w*/}]},l={cN:"string",c:[e.BE,n,o],b:"`",e:"`"},c={cN:"meta",b:"@"+t};return a.c=[{cN:"number",b:/(\b0x[\d_]*(\.[\d_]*)?|0x\.\d[\d_]*)p[-+]?\d+|\b0[box][a-fA-F0-9][a-fA-F0-9_]*|(\b\d[\d_]*(\.[\d_]*)?|\.\d[\d_]*)([eEfF][-+]?\d+)?/,r:0},{cN:"string",b:/'(.|\\[xXuU][a-zA-Z0-9]+)'/},i,l,c,{cN:"comment",v:[{b:"#=",e:"=#",r:10},{b:"#",e:"$"}]},e.HCM,{cN:"keyword",b:"\\b(((abstract|primitive)\\s+)type|(mutable\\s+)?struct)\\b"},{b:/<:/}],n.c=a.c,a});hljs.registerLanguage("scala",function(e){var t={cN:"subst",v:[{b:"\\$[A-Za-z0-9_]+"},{b:"\\${",e:"}"}]},a={cN:"string",v:[{b:'"',e:'"',i:"\\n",c:[e.BE]},{b:'"""',e:'"""',r:10},{b:'[a-z]+"',e:'"',i:"\\n",c:[e.BE,t]},{cN:"string",b:'[a-z]+"""',e:'"""',c:[t],r:10}]},r={cN:"type",b:"\\b[A-Z][A-Za-z0-9_]*",r:0},c={cN:"title",b:/[^0-9\n\t "'(),.`{}\[\]:;][^\n\t "'(),.`{}\[\]:;]+|[^0-9\n\t "'(),.`{}\[\]:;=]/,r:0},i={cN:"class",bK:"class object trait type",e:/[:={\[\n;]/,eE:!0,c:[{bK:"extends with",r:10},{b:/\[/,e:/\]/,eB:!0,eE:!0,r:0,c:[r]},{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,r:0,c:[r]},c]},s={cN:"function",bK:"def",e:/[:={\[(\n;]/,eE:!0,c:[c]};return{k:{literal:"true false null",keyword:"type yield lazy override def with val var sealed abstract private trait object if forSome for while throw finally protected extends import final return else break new catch super class case package default try this match continue throws implicit"},c:[e.CLCM,e.CBCM,a,{cN:"symbol",b:"'\\w[\\w\\d_]*(?!')"},r,s,i,e.CNM,{cN:"meta",b:"@[A-Za-z]+"}]}});hljs.registerLanguage("nginx",function(e){var r={cN:"variable",v:[{b:/\$\d+/},{b:/\$\{/,e:/}/},{b:"[\\$\\@]"+e.UIR}]},b={eW:!0,l:"[a-z/_]+",k:{literal:"on off yes no true false none blocked debug info notice warn error crit select break last permanent redirect kqueue rtsig epoll poll /dev/poll"},r:0,i:"=>",c:[e.HCM,{cN:"string",c:[e.BE,r],v:[{b:/"/,e:/"/},{b:/'/,e:/'/}]},{b:"([a-z]+):/",e:"\\s",eW:!0,eE:!0,c:[r]},{cN:"regexp",c:[e.BE,r],v:[{b:"\\s\\^",e:"\\s|{|;",rE:!0},{b:"~\\*?\\s+",e:"\\s|{|;",rE:!0},{b:"\\*(\\.[a-z\\-]+)+"},{b:"([a-z\\-]+\\.)+\\*"}]},{cN:"number",b:"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d{1,5})?\\b"},{cN:"number",b:"\\b\\d+[kKmMgGdshdwy]*\\b",r:0},r]};return{aliases:["nginxconf"],c:[e.HCM,{b:e.UIR+"\\s+{",rB:!0,e:"{",c:[{cN:"section",b:e.UIR}],r:0},{b:e.UIR+"\\s",e:";|{",rB:!0,c:[{cN:"attribute",b:e.UIR,starts:b}],r:0}],i:"[^\\s\\}]"}});hljs.registerLanguage("sql",function(e){var t=e.C("--","$");return{cI:!0,i:/[<>{}*]/,c:[{bK:"begin end start commit rollback savepoint lock alter create drop rename call delete do handler insert load replace select truncate update set show pragma grant merge describe use explain help declare prepare execute deallocate release unlock purge reset change stop analyze cache flush optimize repair kill install uninstall checksum restore check backup revoke comment values with",e:/;/,eW:!0,l:/[\w\.]+/,k:{keyword:"as abort abs absolute acc acce accep accept access accessed accessible account acos action activate add addtime admin administer advanced advise aes_decrypt aes_encrypt after agent aggregate ali alia alias all allocate allow alter always analyze ancillary and anti any anydata anydataset anyschema anytype apply archive archived archivelog are as asc ascii asin assembly assertion associate asynchronous at atan atn2 attr attri attrib attribu attribut attribute attributes audit authenticated authentication authid authors auto autoallocate autodblink autoextend automatic availability avg backup badfile basicfile before begin beginning benchmark between bfile bfile_base big bigfile bin binary_double binary_float binlog bit_and bit_count bit_length bit_or bit_xor bitmap blob_base block blocksize body both bound bucket buffer_cache buffer_pool build bulk by byte byteordermark bytes cache caching call calling cancel capacity cascade cascaded case cast catalog category ceil ceiling chain change changed char_base char_length character_length characters characterset charindex charset charsetform charsetid check checksum checksum_agg child choose chr chunk class cleanup clear client clob clob_base clone close cluster_id cluster_probability cluster_set clustering coalesce coercibility col collate collation collect colu colum column column_value columns columns_updated comment commit compact compatibility compiled complete composite_limit compound compress compute concat concat_ws concurrent confirm conn connec connect connect_by_iscycle connect_by_isleaf connect_by_root connect_time connection consider consistent constant constraint constraints constructor container content contents context contributors controlfile conv convert convert_tz corr corr_k corr_s corresponding corruption cos cost count count_big counted covar_pop covar_samp cpu_per_call cpu_per_session crc32 create creation critical cross cube cume_dist curdate current current_date current_time current_timestamp current_user cursor curtime customdatum cycle data database databases datafile datafiles datalength date_add date_cache date_format date_sub dateadd datediff datefromparts datename datepart datetime2fromparts day day_to_second dayname dayofmonth dayofweek dayofyear days db_role_change dbtimezone ddl deallocate declare decode decompose decrement decrypt deduplicate def defa defau defaul default defaults deferred defi defin define degrees delayed delegate delete delete_all delimited demand dense_rank depth dequeue des_decrypt des_encrypt des_key_file desc descr descri describ describe descriptor deterministic diagnostics difference dimension direct_load directory disable disable_all disallow disassociate discardfile disconnect diskgroup distinct distinctrow distribute distributed div do document domain dotnet double downgrade drop dumpfile duplicate duration each edition editionable editions element ellipsis else elsif elt empty enable enable_all enclosed encode encoding encrypt end end-exec endian enforced engine engines enqueue enterprise entityescaping eomonth error errors escaped evalname evaluate event eventdata events except exception exceptions exchange exclude excluding execu execut execute exempt exists exit exp expire explain explode export export_set extended extent external external_1 external_2 externally extract failed failed_login_attempts failover failure far fast feature_set feature_value fetch field fields file file_name_convert filesystem_like_logging final finish first first_value fixed flash_cache flashback floor flush following follows for forall force foreign form forma format found found_rows freelist freelists freepools fresh from from_base64 from_days ftp full function general generated get get_format get_lock getdate getutcdate global global_name globally go goto grant grants greatest group group_concat group_id grouping grouping_id groups gtid_subtract guarantee guard handler hash hashkeys having hea head headi headin heading heap help hex hierarchy high high_priority hosts hour hours http id ident_current ident_incr ident_seed identified identity idle_time if ifnull ignore iif ilike ilm immediate import in include including increment index indexes indexing indextype indicator indices inet6_aton inet6_ntoa inet_aton inet_ntoa infile initial initialized initially initrans inmemory inner innodb input insert install instance instantiable instr interface interleaved intersect into invalidate invisible is is_free_lock is_ipv4 is_ipv4_compat is_not is_not_null is_used_lock isdate isnull isolation iterate java join json json_exists keep keep_duplicates key keys kill language large last last_day last_insert_id last_value lateral lax lcase lead leading least leaves left len lenght length less level levels library like like2 like4 likec limit lines link list listagg little ln load load_file lob lobs local localtime localtimestamp locate locator lock locked log log10 log2 logfile logfiles logging logical logical_reads_per_call logoff logon logs long loop low low_priority lower lpad lrtrim ltrim main make_set makedate maketime managed management manual map mapping mask master master_pos_wait match matched materialized max maxextents maximize maxinstances maxlen maxlogfiles maxloghistory maxlogmembers maxsize maxtrans md5 measures median medium member memcompress memory merge microsecond mid migration min minextents minimum mining minus minute minutes minvalue missing mod mode model modification modify module monitoring month months mount move movement multiset mutex name name_const names nan national native natural nav nchar nclob nested never new newline next nextval no no_write_to_binlog noarchivelog noaudit nobadfile nocheck nocompress nocopy nocycle nodelay nodiscardfile noentityescaping noguarantee nokeep nologfile nomapping nomaxvalue nominimize nominvalue nomonitoring none noneditionable nonschema noorder nopr nopro noprom nopromp noprompt norely noresetlogs noreverse normal norowdependencies noschemacheck noswitch not nothing notice notnull notrim novalidate now nowait nth_value nullif nulls num numb numbe nvarchar nvarchar2 object ocicoll ocidate ocidatetime ociduration ociinterval ociloblocator ocinumber ociref ocirefcursor ocirowid ocistring ocitype oct octet_length of off offline offset oid oidindex old on online only opaque open operations operator optimal optimize option optionally or oracle oracle_date oradata ord ordaudio orddicom orddoc order ordimage ordinality ordvideo organization orlany orlvary out outer outfile outline output over overflow overriding package pad parallel parallel_enable parameters parent parse partial partition partitions pascal passing password password_grace_time password_lock_time password_reuse_max password_reuse_time password_verify_function patch path patindex pctincrease pctthreshold pctused pctversion percent percent_rank percentile_cont percentile_disc performance period period_add period_diff permanent physical pi pipe pipelined pivot pluggable plugin policy position post_transaction pow power pragma prebuilt precedes preceding precision prediction prediction_cost prediction_details prediction_probability prediction_set prepare present preserve prior priority private private_sga privileges procedural procedure procedure_analyze processlist profiles project prompt protection public publishingservername purge quarter query quick quiesce quota quotename radians raise rand range rank raw read reads readsize rebuild record records recover recovery recursive recycle redo reduced ref reference referenced references referencing refresh regexp_like register regr_avgx regr_avgy regr_count regr_intercept regr_r2 regr_slope regr_sxx regr_sxy reject rekey relational relative relaylog release release_lock relies_on relocate rely rem remainder rename repair repeat replace replicate replication required reset resetlogs resize resource respect restore restricted result result_cache resumable resume retention return returning returns reuse reverse revoke right rlike role roles rollback rolling rollup round row row_count rowdependencies rowid rownum rows rtrim rules safe salt sample save savepoint sb1 sb2 sb4 scan schema schemacheck scn scope scroll sdo_georaster sdo_topo_geometry search sec_to_time second seconds section securefile security seed segment select self semi sequence sequential serializable server servererror session session_user sessions_per_user set sets settings sha sha1 sha2 share shared shared_pool short show shrink shutdown si_averagecolor si_colorhistogram si_featurelist si_positionalcolor si_stillimage si_texture siblings sid sign sin size size_t sizes skip slave sleep smalldatetimefromparts smallfile snapshot some soname sort soundex source space sparse spfile split sql sql_big_result sql_buffer_result sql_cache sql_calc_found_rows sql_small_result sql_variant_property sqlcode sqldata sqlerror sqlname sqlstate sqrt square standalone standby start starting startup statement static statistics stats_binomial_test stats_crosstab stats_ks_test stats_mode stats_mw_test stats_one_way_anova stats_t_test_ stats_t_test_indep stats_t_test_one stats_t_test_paired stats_wsr_test status std stddev stddev_pop stddev_samp stdev stop storage store stored str str_to_date straight_join strcmp strict string struct stuff style subdate subpartition subpartitions substitutable substr substring subtime subtring_index subtype success sum suspend switch switchoffset switchover sync synchronous synonym sys sys_xmlagg sysasm sysaux sysdate sysdatetimeoffset sysdba sysoper system system_user sysutcdatetime table tables tablespace tablesample tan tdo template temporary terminated tertiary_weights test than then thread through tier ties time time_format time_zone timediff timefromparts timeout timestamp timestampadd timestampdiff timezone_abbr timezone_minute timezone_region to to_base64 to_date to_days to_seconds todatetimeoffset trace tracking transaction transactional translate translation treat trigger trigger_nestlevel triggers trim truncate try_cast try_convert try_parse type ub1 ub2 ub4 ucase unarchived unbounded uncompress under undo unhex unicode uniform uninstall union unique unix_timestamp unknown unlimited unlock unnest unpivot unrecoverable unsafe unsigned until untrusted unusable unused update updated upgrade upped upper upsert url urowid usable usage use use_stored_outlines user user_data user_resources users using utc_date utc_timestamp uuid uuid_short validate validate_password_strength validation valist value values var var_samp varcharc vari varia variab variabl variable variables variance varp varraw varrawc varray verify version versions view virtual visible void wait wallet warning warnings week weekday weekofyear wellformed when whene whenev wheneve whenever where while whitespace window with within without work wrapped xdb xml xmlagg xmlattributes xmlcast xmlcolattval xmlelement xmlexists xmlforest xmlindex xmlnamespaces xmlpi xmlquery xmlroot xmlschema xmlserialize xmltable xmltype xor year year_to_month years yearweek",literal:"true false null unknown",built_in:"array bigint binary bit blob bool boolean char character date dec decimal float int int8 integer interval number numeric real record serial serial8 smallint text time timestamp tinyint varchar varying void"},c:[{cN:"string",b:"'",e:"'",c:[e.BE,{b:"''"}]},{cN:"string",b:'"',e:'"',c:[e.BE,{b:'""'}]},{cN:"string",b:"`",e:"`",c:[e.BE]},e.CNM,e.CBCM,t,e.HCM]},e.CBCM,t,e.HCM]}});hljs.registerLanguage("ini",function(e){var b={cN:"string",c:[e.BE],v:[{b:"'''",e:"'''",r:10},{b:'"""',e:'"""',r:10},{b:'"',e:'"'},{b:"'",e:"'"}]};return{aliases:["toml"],cI:!0,i:/\S/,c:[e.C(";","$"),e.HCM,{cN:"section",b:/^\s*\[+/,e:/\]+/},{b:/^[a-z0-9\[\]_\.-]+\s*=\s*/,e:"$",rB:!0,c:[{cN:"attr",b:/[a-z0-9\[\]_\.-]+/},{b:/=/,eW:!0,r:0,c:[e.C(";","$"),e.HCM,{cN:"literal",b:/\bon|off|true|false|yes|no\b/},{cN:"variable",v:[{b:/\$[\w\d"][\w\d_]*/},{b:/\$\{(.*?)}/}]},b,{cN:"number",b:/([\+\-]+)?[\d]+_[\d_]+/},e.NM]}]}]}});hljs.registerLanguage("rust",function(e){var t="([ui](8|16|32|64|128|size)|f(32|64))?",r="drop i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize f32 f64 str char bool Box Option Result String Vec Copy Send Sized Sync Drop Fn FnMut FnOnce ToOwned Clone Debug PartialEq PartialOrd Eq Ord AsRef AsMut Into From Default Iterator Extend IntoIterator DoubleEndedIterator ExactSizeIterator SliceConcatExt ToString assert! assert_eq! bitflags! bytes! cfg! col! concat! concat_idents! debug_assert! debug_assert_eq! env! panic! file! format! format_args! include_bin! include_str! line! local_data_key! module_path! option_env! print! println! select! stringify! try! unimplemented! unreachable! vec! write! writeln! macro_rules! assert_ne! debug_assert_ne!";return{aliases:["rs"],k:{keyword:"abstract as async await become box break const continue crate do dyn else enum extern false final fn for if impl in let loop macro match mod move mut override priv pub ref return self Self static struct super trait true try type typeof unsafe unsized use virtual where while yield",literal:"true false Some None Ok Err",built_in:r},l:e.IR+"!?",i:""}]}});hljs.registerLanguage("css",function(e){var c={b:/(?:[A-Z\_\.\-]+|--[a-zA-Z0-9_-]+)\s*:/,rB:!0,e:";",eW:!0,c:[{cN:"attribute",b:/\S/,e:":",eE:!0,starts:{eW:!0,eE:!0,c:[{b:/[\w-]+\(/,rB:!0,c:[{cN:"built_in",b:/[\w-]+/},{b:/\(/,e:/\)/,c:[e.ASM,e.QSM]}]},e.CSSNM,e.QSM,e.ASM,e.CBCM,{cN:"number",b:"#[0-9A-Fa-f]+"},{cN:"meta",b:"!important"}]}}]};return{cI:!0,i:/[=\/|'\$]/,c:[e.CBCM,{cN:"selector-id",b:/#[A-Za-z0-9_-]+/},{cN:"selector-class",b:/\.[A-Za-z0-9_-]+/},{cN:"selector-attr",b:/\[/,e:/\]/,i:"$"},{cN:"selector-pseudo",b:/:(:)?[a-zA-Z0-9\_\-\+\(\)"'.]+/},{b:"@(font-face|page)",l:"[a-z-]+",k:"font-face page"},{b:"@",e:"[{;]",i:/:/,c:[{cN:"keyword",b:/\w+/},{b:/\s/,eW:!0,eE:!0,r:0,c:[e.ASM,e.QSM,e.CSSNM]}]},{cN:"selector-tag",b:"[a-zA-Z-][a-zA-Z0-9_-]*",r:0},{b:"{",e:"}",i:/\S/,c:[e.CBCM,c]}]}});hljs.registerLanguage("objectivec",function(e){var t=/[a-zA-Z@][a-zA-Z0-9_]*/,_="@interface @class @protocol @implementation";return{aliases:["mm","objc","obj-c"],k:{keyword:"int float while char export sizeof typedef const struct for union unsigned long volatile static bool mutable if do return goto void enum else break extern asm case short default double register explicit signed typename this switch continue wchar_t inline readonly assign readwrite self @synchronized id typeof nonatomic super unichar IBOutlet IBAction strong weak copy in out inout bycopy byref oneway __strong __weak __block __autoreleasing @private @protected @public @try @property @end @throw @catch @finally @autoreleasepool @synthesize @dynamic @selector @optional @required @encode @package @import @defs @compatibility_alias __bridge __bridge_transfer __bridge_retained __bridge_retain __covariant __contravariant __kindof _Nonnull _Nullable _Null_unspecified __FUNCTION__ __PRETTY_FUNCTION__ __attribute__ getter setter retain unsafe_unretained nonnull nullable null_unspecified null_resettable class instancetype NS_DESIGNATED_INITIALIZER NS_UNAVAILABLE NS_REQUIRES_SUPER NS_RETURNS_INNER_POINTER NS_INLINE NS_AVAILABLE NS_DEPRECATED NS_ENUM NS_OPTIONS NS_SWIFT_UNAVAILABLE NS_ASSUME_NONNULL_BEGIN NS_ASSUME_NONNULL_END NS_REFINED_FOR_SWIFT NS_SWIFT_NAME NS_SWIFT_NOTHROW NS_DURING NS_HANDLER NS_ENDHANDLER NS_VALUERETURN NS_VOIDRETURN",literal:"false true FALSE TRUE nil YES NO NULL",built_in:"BOOL dispatch_once_t dispatch_queue_t dispatch_sync dispatch_async dispatch_once"},l:t,i:""}]}]},{cN:"class",b:"("+_.split(" ").join("|")+")\\b",e:"({|$)",eE:!0,k:_,l:t,c:[e.UTM]},{b:"\\."+e.UIR,r:0}]}});hljs.registerLanguage("apache",function(e){var r={cN:"number",b:"[\\$%]\\d+"};return{aliases:["apacheconf"],cI:!0,c:[e.HCM,{cN:"section",b:""},{cN:"attribute",b:/\w+/,r:0,k:{nomarkup:"order deny allow setenv rewriterule rewriteengine rewritecond documentroot sethandler errordocument loadmodule options header listen serverroot servername"},starts:{e:/$/,r:0,k:{literal:"on off all"},c:[{cN:"meta",b:"\\s\\[",e:"\\]$"},{cN:"variable",b:"[\\$%]\\{",e:"\\}",c:["self",r]},r,e.QSM]}}],i:/\S/}});hljs.registerLanguage("coffeescript",function(e){var c={keyword:"in if for while finally new do return else break catch instanceof throw try this switch continue typeof delete debugger super yield import export from as default await then unless until loop of by when and or is isnt not",literal:"true false null undefined yes no on off",built_in:"npm require console print module global window document"},n="[A-Za-z$_][0-9A-Za-z$_]*",r={cN:"subst",b:/#\{/,e:/}/,k:c},i=[e.BNM,e.inherit(e.CNM,{starts:{e:"(\\s*/)?",r:0}}),{cN:"string",v:[{b:/'''/,e:/'''/,c:[e.BE]},{b:/'/,e:/'/,c:[e.BE]},{b:/"""/,e:/"""/,c:[e.BE,r]},{b:/"/,e:/"/,c:[e.BE,r]}]},{cN:"regexp",v:[{b:"///",e:"///",c:[r,e.HCM]},{b:"//[gim]*",r:0},{b:/\/(?![ *])(\\\/|.)*?\/[gim]*(?=\W|$)/}]},{b:"@"+n},{sL:"javascript",eB:!0,eE:!0,v:[{b:"```",e:"```"},{b:"`",e:"`"}]}];r.c=i;var s=e.inherit(e.TM,{b:n}),t="(\\(.*\\))?\\s*\\B[-=]>",o={cN:"params",b:"\\([^\\(]",rB:!0,c:[{b:/\(/,e:/\)/,k:c,c:["self"].concat(i)}]};return{aliases:["coffee","cson","iced"],k:c,i:/\/\*/,c:i.concat([e.C("###","###"),e.HCM,{cN:"function",b:"^\\s*"+n+"\\s*=\\s*"+t,e:"[-=]>",rB:!0,c:[s,o]},{b:/[:\(,=]\s*/,r:0,c:[{cN:"function",b:t,e:"[-=]>",rB:!0,c:[o]}]},{cN:"class",bK:"class",e:"$",i:/[:="\[\]]/,c:[{bK:"extends",eW:!0,i:/[:="\[\]]/,c:[s]},s]},{b:n+":",e:":",rB:!0,rE:!0,r:0}])}});hljs.registerLanguage("swift",function(e){var i={keyword:"#available #colorLiteral #column #else #elseif #endif #file #fileLiteral #function #if #imageLiteral #line #selector #sourceLocation _ __COLUMN__ __FILE__ __FUNCTION__ __LINE__ Any as as! as? associatedtype associativity break case catch class continue convenience default defer deinit didSet do dynamic dynamicType else enum extension fallthrough false fileprivate final for func get guard if import in indirect infix init inout internal is lazy left let mutating nil none nonmutating open operator optional override postfix precedence prefix private protocol Protocol public repeat required rethrows return right self Self set static struct subscript super switch throw throws true try try! try? Type typealias unowned var weak where while willSet",literal:"true false nil",built_in:"abs advance alignof alignofValue anyGenerator assert assertionFailure bridgeFromObjectiveC bridgeFromObjectiveCUnconditional bridgeToObjectiveC bridgeToObjectiveCUnconditional c contains count countElements countLeadingZeros debugPrint debugPrintln distance dropFirst dropLast dump encodeBitsAsWords enumerate equal fatalError filter find getBridgedObjectiveCType getVaList indices insertionSort isBridgedToObjectiveC isBridgedVerbatimToObjectiveC isUniquelyReferenced isUniquelyReferencedNonObjC join lazy lexicographicalCompare map max maxElement min minElement numericCast overlaps partition posix precondition preconditionFailure print println quickSort readLine reduce reflect reinterpretCast reverse roundUpToAlignment sizeof sizeofValue sort split startsWith stride strideof strideofValue swap toString transcode underestimateCount unsafeAddressOf unsafeBitCast unsafeDowncast unsafeUnwrap unsafeReflect withExtendedLifetime withObjectAtPlusZero withUnsafePointer withUnsafePointerToObject withUnsafeMutablePointer withUnsafeMutablePointers withUnsafePointer withUnsafePointers withVaList zip"},t=e.C("/\\*","\\*/",{c:["self"]}),n={cN:"subst",b:/\\\(/,e:"\\)",k:i,c:[]},r={cN:"string",c:[e.BE,n],v:[{b:/"""/,e:/"""/},{b:/"/,e:/"/}]},a={cN:"number",b:"\\b([\\d_]+(\\.[\\deE_]+)?|0x[a-fA-F0-9_]+(\\.[a-fA-F0-9p_]+)?|0b[01_]+|0o[0-7_]+)\\b",r:0};return n.c=[a],{k:i,c:[r,e.CLCM,t,{cN:"type",b:"\\b[A-Z][\\wÀ-ʸ']*[!?]"},{cN:"type",b:"\\b[A-Z][\\wÀ-ʸ']*",r:0},a,{cN:"function",bK:"func",e:"{",eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][0-9A-Za-z$_]*/}),{b://},{cN:"params",b:/\(/,e:/\)/,endsParent:!0,k:i,c:["self",a,r,e.CBCM,{b:":"}],i:/["']/}],i:/\[|%/},{cN:"class",bK:"struct protocol class extension enum",k:i,e:"\\{",eE:!0,c:[e.inherit(e.TM,{b:/[A-Za-z$_][\u00C0-\u02B80-9A-Za-z$_]*/})]},{cN:"meta",b:"(@discardableResult|@warn_unused_result|@exported|@lazy|@noescape|@NSCopying|@NSManaged|@objc|@objcMembers|@convention|@required|@noreturn|@IBAction|@IBDesignable|@IBInspectable|@IBOutlet|@infix|@prefix|@postfix|@autoclosure|@testable|@available|@nonobjc|@NSApplicationMain|@UIApplicationMain)"},{bK:"import",e:/$/,c:[e.CLCM,t]}]}});hljs.registerLanguage("cpp",function(t){var e={cN:"keyword",b:"\\b[a-z\\d_]*_t\\b"},r={cN:"string",v:[{b:'(u8?|U|L)?"',e:'"',i:"\\n",c:[t.BE]},{b:/(?:u8?|U|L)?R"([^()\\ ]{0,16})\((?:.|\n)*?\)\1"/},{b:"'\\\\?.",e:"'",i:"."}]},s={cN:"number",v:[{b:"\\b(0b[01']+)"},{b:"(-?)\\b([\\d']+(\\.[\\d']*)?|\\.[\\d']+)(u|U|l|L|ul|UL|f|F|b|B)"},{b:"(-?)(\\b0[xX][a-fA-F0-9']+|(\\b[\\d']+(\\.[\\d']*)?|\\.[\\d']+)([eE][-+]?[\\d']+)?)"}],r:0},i={cN:"meta",b:/#\s*[a-z]+\b/,e:/$/,k:{"meta-keyword":"if else elif endif define undef warning error line pragma ifdef ifndef include"},c:[{b:/\\\n/,r:0},t.inherit(r,{cN:"meta-string"}),{cN:"meta-string",b:/<[^\n>]*>/,e:/$/,i:"\\n"},t.CLCM,t.CBCM]},a=t.IR+"\\s*\\(",c={keyword:"int float while private char catch import module export virtual operator sizeof dynamic_cast|10 typedef const_cast|10 const for static_cast|10 union namespace unsigned long volatile static protected bool template mutable if public friend do goto auto void enum else break extern using asm case typeid short reinterpret_cast|10 default double register explicit signed typename try this switch continue inline delete alignof constexpr decltype noexcept static_assert thread_local restrict _Bool complex _Complex _Imaginary atomic_bool atomic_char atomic_schar atomic_uchar atomic_short atomic_ushort atomic_int atomic_uint atomic_long atomic_ulong atomic_llong atomic_ullong new throw return and or not",built_in:"std string cin cout cerr clog stdin stdout stderr stringstream istringstream ostringstream auto_ptr deque list queue stack vector map set bitset multiset multimap unordered_set unordered_map unordered_multiset unordered_multimap array shared_ptr abort abs acos asin atan2 atan calloc ceil cosh cos exit exp fabs floor fmod fprintf fputs free frexp fscanf isalnum isalpha iscntrl isdigit isgraph islower isprint ispunct isspace isupper isxdigit tolower toupper labs ldexp log10 log malloc realloc memchr memcmp memcpy memset modf pow printf putchar puts scanf sinh sin snprintf sprintf sqrt sscanf strcat strchr strcmp strcpy strcspn strlen strncat strncmp strncpy strpbrk strrchr strspn strstr tanh tan vfprintf vprintf vsprintf endl initializer_list unique_ptr",literal:"true false nullptr NULL"},n=[e,t.CLCM,t.CBCM,s,r];return{aliases:["c","cc","h","c++","h++","hpp","hh","hxx","cxx"],k:c,i:"",k:c,c:["self",e]},{b:t.IR+"::",k:c},{v:[{b:/=/,e:/;/},{b:/\(/,e:/\)/},{bK:"new throw return else",e:/;/}],k:c,c:n.concat([{b:/\(/,e:/\)/,k:c,c:n.concat(["self"]),r:0}]),r:0},{cN:"function",b:"("+t.IR+"[\\*&\\s]+)+"+a,rB:!0,e:/[{;=]/,eE:!0,k:c,i:/[^\w\s\*&]/,c:[{b:a,rB:!0,c:[t.TM],r:0},{cN:"params",b:/\(/,e:/\)/,k:c,r:0,c:[t.CLCM,t.CBCM,r,s,e,{b:/\(/,e:/\)/,k:c,r:0,c:["self",t.CLCM,t.CBCM,r,s,e]}]},t.CLCM,t.CBCM,i]},{cN:"class",bK:"class struct",e:/[{;:]/,c:[{b://,c:["self"]},t.TM]}]),exports:{preprocessor:i,strings:r,k:c}}});hljs.registerLanguage("java",function(e){var a="false synchronized int abstract float private char boolean var static null if const for true while long strictfp finally protected import native final void enum else break transient catch instanceof byte super volatile case assert short package default double public try this switch continue throws protected public private module requires exports do",t={cN:"number",b:"\\b(0[bB]([01]+[01_]+[01]+|[01]+)|0[xX]([a-fA-F0-9]+[a-fA-F0-9_]+[a-fA-F0-9]+|[a-fA-F0-9]+)|(([\\d]+[\\d_]+[\\d]+|[\\d]+)(\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))?|\\.([\\d]+[\\d_]+[\\d]+|[\\d]+))([eE][-+]?\\d+)?)[lLfF]?",r:0};return{aliases:["jsp"],k:a,i:/<\/|#/,c:[e.C("/\\*\\*","\\*/",{r:0,c:[{b:/\w+@/,r:0},{cN:"doctag",b:"@[A-Za-z]+"}]}),e.CLCM,e.CBCM,e.ASM,e.QSM,{cN:"class",bK:"class interface",e:/[{;=]/,eE:!0,k:"class interface",i:/[:"\[\]]/,c:[{bK:"extends implements"},e.UTM]},{bK:"new throw return else",r:0},{cN:"function",b:"([À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(<[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*(\\s*,\\s*[À-ʸa-zA-Z_$][À-ʸa-zA-Z_$0-9]*)*>)?\\s+)+"+e.UIR+"\\s*\\(",rB:!0,e:/[{;=]/,eE:!0,k:a,c:[{b:e.UIR+"\\s*\\(",rB:!0,r:0,c:[e.UTM]},{cN:"params",b:/\(/,e:/\)/,k:a,r:0,c:[e.ASM,e.QSM,e.CNM,e.CBCM]},e.CLCM,e.CBCM]},t,{cN:"meta",b:"@[A-Za-z]+"}]}});hljs.registerLanguage("python",function(e){var r={keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda async await nonlocal|10",built_in:"Ellipsis NotImplemented",literal:"False None True"},b={cN:"meta",b:/^(>>>|\.\.\.) /},c={cN:"subst",b:/\{/,e:/\}/,k:r,i:/#/},a={cN:"string",c:[e.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[e.BE,b],r:10},{b:/(u|b)?r?"""/,e:/"""/,c:[e.BE,b],r:10},{b:/(fr|rf|f)'''/,e:/'''/,c:[e.BE,b,c]},{b:/(fr|rf|f)"""/,e:/"""/,c:[e.BE,b,c]},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)"/,e:/"/,r:10},{b:/(b|br)'/,e:/'/},{b:/(b|br)"/,e:/"/},{b:/(fr|rf|f)'/,e:/'/,c:[e.BE,c]},{b:/(fr|rf|f)"/,e:/"/,c:[e.BE,c]},e.ASM,e.QSM]},i={cN:"number",r:0,v:[{b:e.BNR+"[lLjJ]?"},{b:"\\b(0o[0-7]+)[lLjJ]?"},{b:e.CNR+"[lLjJ]?"}]},l={cN:"params",b:/\(/,e:/\)/,c:["self",b,i,a]};return c.c=[a,i,b],{aliases:["py","gyp","ipython"],k:r,i:/(<\/|->|\?)|=>/,c:[b,i,a,e.HCM,{v:[{cN:"function",bK:"def"},{cN:"class",bK:"class"}],e:/:/,i:/[${=;\n,]/,c:[e.UTM,l,{b:/->/,eW:!0,k:"None"}]},{cN:"meta",b:/^[\t ]*@/,e:/$/},{b:/\b(print|exec)\(/}]}});hljs.registerLanguage("haskell",function(e){var i={v:[e.C("--","$"),e.C("{-","-}",{c:["self"]})]},a={cN:"meta",b:"{-#",e:"#-}"},l={cN:"meta",b:"^#",e:"$"},c={cN:"type",b:"\\b[A-Z][\\w']*",r:0},n={b:"\\(",e:"\\)",i:'"',c:[a,l,{cN:"type",b:"\\b[A-Z][\\w]*(\\((\\.\\.|,|\\w+)\\))?"},e.inherit(e.TM,{b:"[_a-z][\\w']*"}),i]};return{aliases:["hs"],k:"let in if then else case of where do module import hiding qualified type data newtype deriving class instance as default infix infixl infixr foreign export ccall stdcall cplusplus jvm dotnet safe unsafe family forall mdo proc rec",c:[{bK:"module",e:"where",k:"module where",c:[n,i],i:"\\W\\.|;"},{b:"\\bimport\\b",e:"$",k:"import qualified as hiding",c:[n,i],i:"\\W\\.|;"},{cN:"class",b:"^(\\s*)?(class|instance)\\b",e:"where",k:"class family instance where",c:[c,n,i]},{cN:"class",b:"\\b(data|(new)?type)\\b",e:"$",k:"data family type newtype deriving",c:[a,c,n,{b:"{",e:"}",c:n.c},i]},{bK:"default",e:"$",c:[c,n,i]},{bK:"infix infixl infixr",e:"$",c:[e.CNM,i]},{b:"\\bforeign\\b",e:"$",k:"foreign import export ccall stdcall cplusplus jvm dotnet safe unsafe",c:[c,e.QSM,i]},{cN:"meta",b:"#!\\/usr\\/bin\\/env runhaskell",e:"$"},a,l,e.QSM,e.CNM,c,e.inherit(e.TM,{b:"^[_a-z][\\w']*"}),i,{b:"->|<-"}]}});hljs.registerLanguage("bash",function(e){var t={cN:"variable",v:[{b:/\$[\w\d#@][\w\d_]*/},{b:/\$\{(.*?)}/}]},s={cN:"string",b:/"/,e:/"/,c:[e.BE,t,{cN:"variable",b:/\$\(/,e:/\)/,c:[e.BE]}]};return{aliases:["sh","zsh"],l:/\b-?[a-z\._]+\b/,k:{keyword:"if then else elif fi for while in do done case esac function",literal:"true false",built_in:"break cd continue eval exec exit export getopts hash pwd readonly return shift test times trap umask unset alias bind builtin caller command declare echo enable help let local logout mapfile printf read readarray source type typeset ulimit unalias set shopt autoload bg bindkey bye cap chdir clone comparguments compcall compctl compdescribe compfiles compgroups compquote comptags comptry compvalues dirs disable disown echotc echoti emulate fc fg float functions getcap getln history integer jobs kill limit log noglob popd print pushd pushln rehash sched setcap setopt stat suspend ttyctl unfunction unhash unlimit unsetopt vared wait whence where which zcompile zformat zftp zle zmodload zparseopts zprof zpty zregexparse zsocket zstyle ztcp",_:"-ne -eq -lt -gt -f -d -e -s -l -a"},c:[{cN:"meta",b:/^#![^\n]+sh\s*$/,r:10},{cN:"function",b:/\w[\w\d_]*\s*\(\s*\)\s*\{/,rB:!0,c:[e.inherit(e.TM,{b:/\w[\w\d_]*/})],r:0},e.HCM,s,{cN:"",b:/\\"/},{cN:"string",b:/'/,e:/'/},t]}});hljs.registerLanguage("shell",function(s){return{aliases:["console"],c:[{cN:"meta",b:"^\\s{0,3}[\\w\\d\\[\\]()@-]*[>%$#]",starts:{e:"$",sL:"bash"}}]}});hljs.registerLanguage("diff",function(e){return{aliases:["patch"],c:[{cN:"meta",r:10,v:[{b:/^@@ +\-\d+,\d+ +\+\d+,\d+ +@@$/},{b:/^\*\*\* +\d+,\d+ +\*\*\*\*$/},{b:/^\-\-\- +\d+,\d+ +\-\-\-\-$/}]},{cN:"comment",v:[{b:/Index: /,e:/$/},{b:/={3,}/,e:/$/},{b:/^\-{3}/,e:/$/},{b:/^\*{3} /,e:/$/},{b:/^\+{3}/,e:/$/},{b:/\*{5}/,e:/\*{5}$/}]},{cN:"addition",b:"^\\+",e:"$"},{cN:"deletion",b:"^\\-",e:"$"},{cN:"addition",b:"^\\!",e:"$"}]}});hljs.registerLanguage("perl",function(e){var t="getpwent getservent quotemeta msgrcv scalar kill dbmclose undef lc ma syswrite tr send umask sysopen shmwrite vec qx utime local oct semctl localtime readpipe do return format read sprintf dbmopen pop getpgrp not getpwnam rewinddir qqfileno qw endprotoent wait sethostent bless s|0 opendir continue each sleep endgrent shutdown dump chomp connect getsockname die socketpair close flock exists index shmgetsub for endpwent redo lstat msgctl setpgrp abs exit select print ref gethostbyaddr unshift fcntl syscall goto getnetbyaddr join gmtime symlink semget splice x|0 getpeername recv log setsockopt cos last reverse gethostbyname getgrnam study formline endhostent times chop length gethostent getnetent pack getprotoent getservbyname rand mkdir pos chmod y|0 substr endnetent printf next open msgsnd readdir use unlink getsockopt getpriority rindex wantarray hex system getservbyport endservent int chr untie rmdir prototype tell listen fork shmread ucfirst setprotoent else sysseek link getgrgid shmctl waitpid unpack getnetbyname reset chdir grep split require caller lcfirst until warn while values shift telldir getpwuid my getprotobynumber delete and sort uc defined srand accept package seekdir getprotobyname semop our rename seek if q|0 chroot sysread setpwent no crypt getc chown sqrt write setnetent setpriority foreach tie sin msgget map stat getlogin unless elsif truncate exec keys glob tied closedirioctl socket readlink eval xor readline binmode setservent eof ord bind alarm pipe atan2 getgrent exp time push setgrent gt lt or ne m|0 break given say state when",r={cN:"subst",b:"[$@]\\{",e:"\\}",k:t},s={b:"->{",e:"}"},n={v:[{b:/\$\d/},{b:/[\$%@](\^\w\b|#\w+(::\w+)*|{\w+}|\w+(::\w*)*)/},{b:/[\$%@][^\s\w{]/,r:0}]},i=[e.BE,r,n],o=[n,e.HCM,e.C("^\\=\\w","\\=cut",{eW:!0}),s,{cN:"string",c:i,v:[{b:"q[qwxr]?\\s*\\(",e:"\\)",r:5},{b:"q[qwxr]?\\s*\\[",e:"\\]",r:5},{b:"q[qwxr]?\\s*\\{",e:"\\}",r:5},{b:"q[qwxr]?\\s*\\|",e:"\\|",r:5},{b:"q[qwxr]?\\s*\\<",e:"\\>",r:5},{b:"qw\\s+q",e:"q",r:5},{b:"'",e:"'",c:[e.BE]},{b:'"',e:'"'},{b:"`",e:"`",c:[e.BE]},{b:"{\\w+}",c:[],r:0},{b:"-?\\w+\\s*\\=\\>",c:[],r:0}]},{cN:"number",b:"(\\b0[0-7_]+)|(\\b0x[0-9a-fA-F_]+)|(\\b[1-9][0-9_]*(\\.[0-9_]+)?)|[0_]\\b",r:0},{b:"(\\/\\/|"+e.RSR+"|\\b(split|return|print|reverse|grep)\\b)\\s*",k:"split return print reverse grep",r:0,c:[e.HCM,{cN:"regexp",b:"(s|tr|y)/(\\\\.|[^/])*/(\\\\.|[^/])*/[a-z]*",r:10},{cN:"regexp",b:"(m|qr)?/",e:"/[a-z]*",c:[e.BE],r:0}]},{cN:"function",bK:"sub",e:"(\\s*\\(.*?\\))?[;{]",eE:!0,r:5,c:[e.TM]},{b:"-\\w\\b",r:0},{b:"^__DATA__$",e:"^__END__$",sL:"mojolicious",c:[{b:"^@@.*",e:"$",cN:"comment"}]}];return r.c=o,{aliases:["pl","pm"],l:/[\w\.]+/,k:t,c:s.c=o}});hljs.registerLanguage("makefile",function(e){var i={cN:"variable",v:[{b:"\\$\\("+e.UIR+"\\)",c:[e.BE]},{b:/\$[@%)?(\\[\\])?";return{aliases:["csharp","c#"],k:i,i:/::/,c:[e.C("///","$",{rB:!0,c:[{cN:"doctag",v:[{b:"///",r:0},{b:"\x3c!--|--\x3e"},{b:""}]}]}),e.CLCM,e.CBCM,{cN:"meta",b:"#",e:"$",k:{"meta-keyword":"if else elif endif define undef warning error line region endregion pragma checksum"}},o,r,{bK:"class interface",e:/[{;=]/,i:/[^\s:,]/,c:[e.TM,e.CLCM,e.CBCM]},{bK:"namespace",e:/[{;=]/,i:/[^\s:]/,c:[e.inherit(e.TM,{b:"[a-zA-Z](\\.?\\w)*"}),e.CLCM,e.CBCM]},{cN:"meta",b:"^\\s*\\[",eB:!0,e:"\\]",eE:!0,c:[{cN:"meta-string",b:/"/,e:/"/}]},{bK:"new return throw await else",r:0},{cN:"function",b:"("+d+"\\s+)+"+e.IR+"\\s*\\(",rB:!0,e:/\s*[{;=]/,eE:!0,k:i,c:[{b:e.IR+"\\s*\\(",rB:!0,c:[e.TM],r:0},{cN:"params",b:/\(/,e:/\)/,eB:!0,eE:!0,k:i,r:0,c:[o,r,e.CBCM]},e.CLCM,e.CBCM]}]}});hljs.registerLanguage("handlebars",function(e){var a={"builtin-name":"each in with if else unless bindattr action collection debugger log outlet template unbound view yield"};return{aliases:["hbs","html.hbs","html.handlebars"],cI:!0,sL:"xml",c:[e.C("{{!(--)?","(--)?}}"),{cN:"template-tag",b:/\{\{[#\/]/,e:/\}\}/,c:[{cN:"name",b:/[a-zA-Z\.-]+/,k:a,starts:{eW:!0,r:0,c:[e.QSM]}}]},{cN:"template-variable",b:/\{\{/,e:/\}\}/,k:a}]}});hljs.registerLanguage("http",function(e){var t="HTTP/[0-9\\.]+";return{aliases:["https"],i:"\\S",c:[{b:"^"+t,e:"$",c:[{cN:"number",b:"\\b\\d{3}\\b"}]},{b:"^[A-Z]+ (.*?) "+t+"$",rB:!0,e:"$",c:[{cN:"string",b:" ",e:" ",eB:!0,eE:!0},{b:t},{cN:"keyword",b:"[A-Z]+"}]},{cN:"attribute",b:"^\\w",e:": ",eE:!0,i:"\\n|\\s|=",starts:{e:"$",r:0}},{b:"\\n\\n",starts:{sL:[],eW:!0}}]}}); \ No newline at end of file diff --git a/perf-guide/index.html b/perf-guide/index.html new file mode 100644 index 000000000..47f555e4c --- /dev/null +++ b/perf-guide/index.html @@ -0,0 +1,238 @@ + + + + + + Introduction - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Introduction

+

What is SIMD

+ +

History of SIMD in Rust

+ +

Discover packed_simd

+ +

Writing fast and portable SIMD algorithms using packed_simd is, unfortunately, +not trivial. There are many pitfals that one should be aware of, and some idioms +that help avoid those pitfalls.

+

This book attempts to document these best practices and provides practical examples +on how to apply the tips to your code.

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/introduction.html b/perf-guide/introduction.html new file mode 100644 index 000000000..0341aa87d --- /dev/null +++ b/perf-guide/introduction.html @@ -0,0 +1,238 @@ + + + + + + Introduction - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Introduction

+

What is SIMD

+ +

History of SIMD in Rust

+ +

Discover packed_simd

+ +

Writing fast and portable SIMD algorithms using packed_simd is, unfortunately, +not trivial. There are many pitfals that one should be aware of, and some idioms +that help avoid those pitfalls.

+

This book attempts to document these best practices and provides practical examples +on how to apply the tips to your code.

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/mark.min.js b/perf-guide/mark.min.js new file mode 100644 index 000000000..163623188 --- /dev/null +++ b/perf-guide/mark.min.js @@ -0,0 +1,7 @@ +/*!*************************************************** +* mark.js v8.11.1 +* https://markjs.io/ +* Copyright (c) 2014–2018, Julian Kühnel +* Released under the MIT license https://git.io/vwTVl +*****************************************************/ +!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):e.Mark=t()}(this,function(){"use strict";var e="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},t=function(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")},n=function(){function e(e,t){for(var n=0;n1&&void 0!==arguments[1])||arguments[1],i=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[],o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:5e3;t(this,e),this.ctx=n,this.iframes=r,this.exclude=i,this.iframesTimeout=o}return n(e,[{key:"getContexts",value:function(){var e=[];return(void 0!==this.ctx&&this.ctx?NodeList.prototype.isPrototypeOf(this.ctx)?Array.prototype.slice.call(this.ctx):Array.isArray(this.ctx)?this.ctx:"string"==typeof this.ctx?Array.prototype.slice.call(document.querySelectorAll(this.ctx)):[this.ctx]:[]).forEach(function(t){var n=e.filter(function(e){return e.contains(t)}).length>0;-1!==e.indexOf(t)||n||e.push(t)}),e}},{key:"getIframeContents",value:function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){},r=void 0;try{var i=e.contentWindow;if(r=i.document,!i||!r)throw new Error("iframe inaccessible")}catch(e){n()}r&&t(r)}},{key:"isIframeBlank",value:function(e){var t="about:blank",n=e.getAttribute("src").trim();return e.contentWindow.location.href===t&&n!==t&&n}},{key:"observeIframeLoad",value:function(e,t,n){var r=this,i=!1,o=null,a=function a(){if(!i){i=!0,clearTimeout(o);try{r.isIframeBlank(e)||(e.removeEventListener("load",a),r.getIframeContents(e,t,n))}catch(e){n()}}};e.addEventListener("load",a),o=setTimeout(a,this.iframesTimeout)}},{key:"onIframeReady",value:function(e,t,n){try{"complete"===e.contentWindow.document.readyState?this.isIframeBlank(e)?this.observeIframeLoad(e,t,n):this.getIframeContents(e,t,n):this.observeIframeLoad(e,t,n)}catch(e){n()}}},{key:"waitForIframes",value:function(e,t){var n=this,r=0;this.forEachIframe(e,function(){return!0},function(e){r++,n.waitForIframes(e.querySelector("html"),function(){--r||t()})},function(e){e||t()})}},{key:"forEachIframe",value:function(t,n,r){var i=this,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:function(){},a=t.querySelectorAll("iframe"),s=a.length,c=0;a=Array.prototype.slice.call(a);var u=function(){--s<=0&&o(c)};s||u(),a.forEach(function(t){e.matches(t,i.exclude)?u():i.onIframeReady(t,function(e){n(t)&&(c++,r(e)),u()},u)})}},{key:"createIterator",value:function(e,t,n){return document.createNodeIterator(e,t,n,!1)}},{key:"createInstanceOnIframe",value:function(t){return new e(t.querySelector("html"),this.iframes)}},{key:"compareNodeIframe",value:function(e,t,n){if(e.compareDocumentPosition(n)&Node.DOCUMENT_POSITION_PRECEDING){if(null===t)return!0;if(t.compareDocumentPosition(n)&Node.DOCUMENT_POSITION_FOLLOWING)return!0}return!1}},{key:"getIteratorNode",value:function(e){var t=e.previousNode();return{prevNode:t,node:null===t?e.nextNode():e.nextNode()&&e.nextNode()}}},{key:"checkIframeFilter",value:function(e,t,n,r){var i=!1,o=!1;return r.forEach(function(e,t){e.val===n&&(i=t,o=e.handled)}),this.compareNodeIframe(e,t,n)?(!1!==i||o?!1===i||o||(r[i].handled=!0):r.push({val:n,handled:!0}),!0):(!1===i&&r.push({val:n,handled:!1}),!1)}},{key:"handleOpenIframes",value:function(e,t,n,r){var i=this;e.forEach(function(e){e.handled||i.getIframeContents(e.val,function(e){i.createInstanceOnIframe(e).forEachNode(t,n,r)})})}},{key:"iterateThroughNodes",value:function(e,t,n,r,i){for(var o,a=this,s=this.createIterator(t,e,r),c=[],u=[],l=void 0,h=void 0;void 0,o=a.getIteratorNode(s),h=o.prevNode,l=o.node;)this.iframes&&this.forEachIframe(t,function(e){return a.checkIframeFilter(l,h,e,c)},function(t){a.createInstanceOnIframe(t).forEachNode(e,function(e){return u.push(e)},r)}),u.push(l);u.forEach(function(e){n(e)}),this.iframes&&this.handleOpenIframes(c,e,n,r),i()}},{key:"forEachNode",value:function(e,t,n){var r=this,i=arguments.length>3&&void 0!==arguments[3]?arguments[3]:function(){},o=this.getContexts(),a=o.length;a||i(),o.forEach(function(o){var s=function(){r.iterateThroughNodes(e,o,t,n,function(){--a<=0&&i()})};r.iframes?r.waitForIframes(o,s):s()})}}],[{key:"matches",value:function(e,t){var n="string"==typeof t?[t]:t,r=e.matches||e.matchesSelector||e.msMatchesSelector||e.mozMatchesSelector||e.oMatchesSelector||e.webkitMatchesSelector;if(r){var i=!1;return n.every(function(t){return!r.call(e,t)||(i=!0,!1)}),i}return!1}}]),e}(),o=function(){function e(n){t(this,e),this.opt=r({},{diacritics:!0,synonyms:{},accuracy:"partially",caseSensitive:!1,ignoreJoiners:!1,ignorePunctuation:[],wildcards:"disabled"},n)}return n(e,[{key:"create",value:function(e){return"disabled"!==this.opt.wildcards&&(e=this.setupWildcardsRegExp(e)),e=this.escapeStr(e),Object.keys(this.opt.synonyms).length&&(e=this.createSynonymsRegExp(e)),(this.opt.ignoreJoiners||this.opt.ignorePunctuation.length)&&(e=this.setupIgnoreJoinersRegExp(e)),this.opt.diacritics&&(e=this.createDiacriticsRegExp(e)),e=this.createMergedBlanksRegExp(e),(this.opt.ignoreJoiners||this.opt.ignorePunctuation.length)&&(e=this.createJoinersRegExp(e)),"disabled"!==this.opt.wildcards&&(e=this.createWildcardsRegExp(e)),e=this.createAccuracyRegExp(e),new RegExp(e,"gm"+(this.opt.caseSensitive?"":"i"))}},{key:"escapeStr",value:function(e){return e.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g,"\\$&")}},{key:"createSynonymsRegExp",value:function(e){var t=this.opt.synonyms,n=this.opt.caseSensitive?"":"i",r=this.opt.ignoreJoiners||this.opt.ignorePunctuation.length?"\0":"";for(var i in t)if(t.hasOwnProperty(i)){var o=t[i],a="disabled"!==this.opt.wildcards?this.setupWildcardsRegExp(i):this.escapeStr(i),s="disabled"!==this.opt.wildcards?this.setupWildcardsRegExp(o):this.escapeStr(o);""!==a&&""!==s&&(e=e.replace(new RegExp("("+this.escapeStr(a)+"|"+this.escapeStr(s)+")","gm"+n),r+"("+this.processSynonyms(a)+"|"+this.processSynonyms(s)+")"+r))}return e}},{key:"processSynonyms",value:function(e){return(this.opt.ignoreJoiners||this.opt.ignorePunctuation.length)&&(e=this.setupIgnoreJoinersRegExp(e)),e}},{key:"setupWildcardsRegExp",value:function(e){return(e=e.replace(/(?:\\)*\?/g,function(e){return"\\"===e.charAt(0)?"?":""})).replace(/(?:\\)*\*/g,function(e){return"\\"===e.charAt(0)?"*":""})}},{key:"createWildcardsRegExp",value:function(e){var t="withSpaces"===this.opt.wildcards;return e.replace(/\u0001/g,t?"[\\S\\s]?":"\\S?").replace(/\u0002/g,t?"[\\S\\s]*?":"\\S*")}},{key:"setupIgnoreJoinersRegExp",value:function(e){return e.replace(/[^(|)\\]/g,function(e,t,n){var r=n.charAt(t+1);return/[(|)\\]/.test(r)||""===r?e:e+"\0"})}},{key:"createJoinersRegExp",value:function(e){var t=[],n=this.opt.ignorePunctuation;return Array.isArray(n)&&n.length&&t.push(this.escapeStr(n.join(""))),this.opt.ignoreJoiners&&t.push("\\u00ad\\u200b\\u200c\\u200d"),t.length?e.split(/\u0000+/).join("["+t.join("")+"]*"):e}},{key:"createDiacriticsRegExp",value:function(e){var t=this.opt.caseSensitive?"":"i",n=this.opt.caseSensitive?["aàáảãạăằắẳẵặâầấẩẫậäåāą","AÀÁẢÃẠĂẰẮẲẴẶÂẦẤẨẪẬÄÅĀĄ","cçćč","CÇĆČ","dđď","DĐĎ","eèéẻẽẹêềếểễệëěēę","EÈÉẺẼẸÊỀẾỂỄỆËĚĒĘ","iìíỉĩịîïī","IÌÍỈĨỊÎÏĪ","lł","LŁ","nñňń","NÑŇŃ","oòóỏõọôồốổỗộơởỡớờợöøō","OÒÓỎÕỌÔỒỐỔỖỘƠỞỠỚỜỢÖØŌ","rř","RŘ","sšśșş","SŠŚȘŞ","tťțţ","TŤȚŢ","uùúủũụưừứửữựûüůū","UÙÚỦŨỤƯỪỨỬỮỰÛÜŮŪ","yýỳỷỹỵÿ","YÝỲỶỸỴŸ","zžżź","ZŽŻŹ"]:["aàáảãạăằắẳẵặâầấẩẫậäåāąAÀÁẢÃẠĂẰẮẲẴẶÂẦẤẨẪẬÄÅĀĄ","cçćčCÇĆČ","dđďDĐĎ","eèéẻẽẹêềếểễệëěēęEÈÉẺẼẸÊỀẾỂỄỆËĚĒĘ","iìíỉĩịîïīIÌÍỈĨỊÎÏĪ","lłLŁ","nñňńNÑŇŃ","oòóỏõọôồốổỗộơởỡớờợöøōOÒÓỎÕỌÔỒỐỔỖỘƠỞỠỚỜỢÖØŌ","rřRŘ","sšśșşSŠŚȘŞ","tťțţTŤȚŢ","uùúủũụưừứửữựûüůūUÙÚỦŨỤƯỪỨỬỮỰÛÜŮŪ","yýỳỷỹỵÿYÝỲỶỸỴŸ","zžżźZŽŻŹ"],r=[];return e.split("").forEach(function(i){n.every(function(n){if(-1!==n.indexOf(i)){if(r.indexOf(n)>-1)return!1;e=e.replace(new RegExp("["+n+"]","gm"+t),"["+n+"]"),r.push(n)}return!0})}),e}},{key:"createMergedBlanksRegExp",value:function(e){return e.replace(/[\s]+/gim,"[\\s]+")}},{key:"createAccuracyRegExp",value:function(e){var t=this,n=this.opt.accuracy,r="string"==typeof n?n:n.value,i="";switch(("string"==typeof n?[]:n.limiters).forEach(function(e){i+="|"+t.escapeStr(e)}),r){case"partially":default:return"()("+e+")";case"complementary":return"()([^"+(i="\\s"+(i||this.escapeStr("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~¡¿")))+"]*"+e+"[^"+i+"]*)";case"exactly":return"(^|\\s"+i+")("+e+")(?=$|\\s"+i+")"}}}]),e}(),a=function(){function a(e){t(this,a),this.ctx=e,this.ie=!1;var n=window.navigator.userAgent;(n.indexOf("MSIE")>-1||n.indexOf("Trident")>-1)&&(this.ie=!0)}return n(a,[{key:"log",value:function(t){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"debug",r=this.opt.log;this.opt.debug&&"object"===(void 0===r?"undefined":e(r))&&"function"==typeof r[n]&&r[n]("mark.js: "+t)}},{key:"getSeparatedKeywords",value:function(e){var t=this,n=[];return e.forEach(function(e){t.opt.separateWordSearch?e.split(" ").forEach(function(e){e.trim()&&-1===n.indexOf(e)&&n.push(e)}):e.trim()&&-1===n.indexOf(e)&&n.push(e)}),{keywords:n.sort(function(e,t){return t.length-e.length}),length:n.length}}},{key:"isNumeric",value:function(e){return Number(parseFloat(e))==e}},{key:"checkRanges",value:function(e){var t=this;if(!Array.isArray(e)||"[object Object]"!==Object.prototype.toString.call(e[0]))return this.log("markRanges() will only accept an array of objects"),this.opt.noMatch(e),[];var n=[],r=0;return e.sort(function(e,t){return e.start-t.start}).forEach(function(e){var i=t.callNoMatchOnInvalidRanges(e,r),o=i.start,a=i.end;i.valid&&(e.start=o,e.length=a-o,n.push(e),r=a)}),n}},{key:"callNoMatchOnInvalidRanges",value:function(e,t){var n=void 0,r=void 0,i=!1;return e&&void 0!==e.start?(r=(n=parseInt(e.start,10))+parseInt(e.length,10),this.isNumeric(e.start)&&this.isNumeric(e.length)&&r-t>0&&r-n>0?i=!0:(this.log("Ignoring invalid or overlapping range: "+JSON.stringify(e)),this.opt.noMatch(e))):(this.log("Ignoring invalid range: "+JSON.stringify(e)),this.opt.noMatch(e)),{start:n,end:r,valid:i}}},{key:"checkWhitespaceRanges",value:function(e,t,n){var r=void 0,i=!0,o=n.length,a=t-o,s=parseInt(e.start,10)-a;return(r=(s=s>o?o:s)+parseInt(e.length,10))>o&&(r=o,this.log("End range automatically set to the max value of "+o)),s<0||r-s<0||s>o||r>o?(i=!1,this.log("Invalid range: "+JSON.stringify(e)),this.opt.noMatch(e)):""===n.substring(s,r).replace(/\s+/g,"")&&(i=!1,this.log("Skipping whitespace only range: "+JSON.stringify(e)),this.opt.noMatch(e)),{start:s,end:r,valid:i}}},{key:"getTextNodes",value:function(e){var t=this,n="",r=[];this.iterator.forEachNode(NodeFilter.SHOW_TEXT,function(e){r.push({start:n.length,end:(n+=e.textContent).length,node:e})},function(e){return t.matchesExclude(e.parentNode)?NodeFilter.FILTER_REJECT:NodeFilter.FILTER_ACCEPT},function(){e({value:n,nodes:r})})}},{key:"matchesExclude",value:function(e){return i.matches(e,this.opt.exclude.concat(["script","style","title","head","html"]))}},{key:"wrapRangeInTextNode",value:function(e,t,n){var r=this.opt.element?this.opt.element:"mark",i=e.splitText(t),o=i.splitText(n-t),a=document.createElement(r);return a.setAttribute("data-markjs","true"),this.opt.className&&a.setAttribute("class",this.opt.className),a.textContent=i.textContent,i.parentNode.replaceChild(a,i),o}},{key:"wrapRangeInMappedTextNode",value:function(e,t,n,r,i){var o=this;e.nodes.every(function(a,s){var c=e.nodes[s+1];if(void 0===c||c.start>t){if(!r(a.node))return!1;var u=t-a.start,l=(n>a.end?a.end:n)-a.start,h=e.value.substr(0,a.start),f=e.value.substr(l+a.start);if(a.node=o.wrapRangeInTextNode(a.node,u,l),e.value=h+f,e.nodes.forEach(function(t,n){n>=s&&(e.nodes[n].start>0&&n!==s&&(e.nodes[n].start-=l),e.nodes[n].end-=l)}),n-=l,i(a.node.previousSibling,a.start),!(n>a.end))return!1;t=a.end}return!0})}},{key:"wrapGroups",value:function(e,t,n,r){return r((e=this.wrapRangeInTextNode(e,t,t+n)).previousSibling),e}},{key:"separateGroups",value:function(e,t,n,r,i){for(var o=t.length,a=1;a-1&&r(t[a],e)&&(e=this.wrapGroups(e,s,t[a].length,i))}return e}},{key:"wrapMatches",value:function(e,t,n,r,i){var o=this,a=0===t?0:t+1;this.getTextNodes(function(t){t.nodes.forEach(function(t){t=t.node;for(var i=void 0;null!==(i=e.exec(t.textContent))&&""!==i[a];){if(o.opt.separateGroups)t=o.separateGroups(t,i,a,n,r);else{if(!n(i[a],t))continue;var s=i.index;if(0!==a)for(var c=1;c + + + + + Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Introduction

+

What is SIMD

+ +

History of SIMD in Rust

+ +

Discover packed_simd

+ +

Writing fast and portable SIMD algorithms using packed_simd is, unfortunately, +not trivial. There are many pitfals that one should be aware of, and some idioms +that help avoid those pitfalls.

+

This book attempts to document these best practices and provides practical examples +on how to apply the tips to your code.

+

Floating-point math

+

This chapter contains information pertaining to working with floating-point numbers.

+

Short Vector Math Library

+ +

Approximate functions

+ +

Fused Multiply Add

+ +

Enabling target features

+

Not all processors of a certain architecture will have SIMD processing units, +and using a SIMD instruction which is not supported will trigger undefined behavior.

+

To allow building safe, portable programs, the Rust compiler will not, by default, +generate any sort of vector instructions, unless it can statically determine +they are supported. For example, on AMD64, SSE2 support is architecturally guaranteed. +The x86_64-apple-darwin target enables up to SSSE3. The get a defintive list of +which features are enabled by default on various platforms, refer to the target +specifications in the compiler's source code.

+

Using RUSTFLAGS

+

One of the easiest ways to benefit from SIMD is to allow the compiler +to generate code using certain vector instruction extensions.

+

The environment variable RUSTFLAGS can be used to pass options for code +generation to the Rust compiler. These flags will affect all compiled crates.

+

There are two flags which can be used to enable specific vector extensions:

+

target-feature

+
    +
  • +

    Syntax: -C target-feature=<features>

    +
  • +
  • +

    Provides the compiler with a comma-separated set of instruction extensions +to enable.

    +

    Example: Use -C target-features=+sse3,+avx to enable generating instructions +for Streaming SIMD Extensions 3 and +Advanced Vector Extensions.

    +
  • +
  • +

    To list target triples for all targets supported by Rust, use:

    +
    rustc --print target-list
    +
    +
  • +
  • +

    To list all support target features for a certain target triple, use:

    +
    rustc --target=${TRIPLE} --print target-features
    +
    +
  • +
  • +

    Note that all CPU features are independent, and will have to be enabled individually.

    +

    Example: Setting -C target-features=+avx2 will not enable fma, even though +all CPUs which support AVX2 also support FMA. To enable both, one has to use +-C target-features=+avx2,+fma

    +
  • +
  • +

    Some features also depend on other features, which need to be enabled for the +target instructions to be generated.

    +

    Example: Unless v7 is specified as the target CPU (see below), to enable +NEON on ARM it is necessary to use -C target-feature=+v7,+neon.

    +
  • +
+

target-cpu

+
    +
  • +

    Syntax: -C target-cpu=<cpu>

    +
  • +
  • +

    Sets the identifier of a CPU family / model for which to build and optimize the code.

    +

    Example: RUSTFLAGS='-C target-cpu=cortex-a75'

    +
  • +
  • +

    To list all supported target CPUs for a certain target triple, use:

    +
    rustc --target=${TRIPLE} --print target-cpus
    +
    +

    Example:

    +
    rustc --target=i686-pc-windows-msvc --print target-cpus
    +
    +
  • +
  • +

    The compiler will translate this into a list of target features. Therefore, +individual feature checks (#[cfg(target_feature = "...")]) will still +work properly.

    +
  • +
  • +

    It will cause the code generator to optimize the generated code for that +specific CPU model.

    +
  • +
  • +

    Using native as the CPU model will cause Rust to generate and optimize code +for the CPU running the compiler. It is useful when building programs which you +plan to only use locally. This should never be used when the generated programs +are meant to be run on other computers, such as when packaging for distribution +or cross-compiling.

    +
  • +
+

The target_feature attribute

+ +

Inlining

+ +

Detecting host features at runtime

+ +

Bounds checking

+

Reading and writing packed vectors to/from slices is checked by default. +Independently of the configuration options used, the safe functions:

+
    +
  • Simd<[T; N]>::from_slice_aligned(& s[..])
  • +
  • Simd<[T; N]>::write_to_slice_aligned(&mut s[..])
  • +
+

always check that:

+
    +
  • the slice is big enough to hold the vector
  • +
  • the slice is suitably aligned to perform an aligned load/store for a Simd<[T; N]> (this alignment is often much larger than that of T).
  • +
+

There are _unaligned versions that use unaligned load and stores, as well as +unsafe _unchecked that do not perform any checks iff debug-assertions = false / debug = false. That is, the _unchecked methods do still assert size +and alignment in debug builds and could also do so in release builds depending +on the configuration options.

+

These assertions do often significantly impact performance and you should be +aware of them.

+

Vertical and horizontal operations

+

In SIMD terminology, each vector has a certain "width" (number of lanes). +A vector processor is able to perform two kinds of operations on a vector:

+
    +
  • Vertical operations: +operate on two vectors of the same width, result has same width
  • +
+

Example: vertical addition of two f32x4 vectors

+
  %0     == | 2 | -3.5 |  0 | 7 |
+              +     +     +   +
+  %1     == | 4 |  1.5 | -1 | 0 |
+              =     =     =   =
+%0 + %1  == | 6 |  -2  | -1 | 7 |
+
+
    +
  • Horizontal operations: +reduce the elements of two vectors in some way, +the result's elements combine information from the two original ones
  • +
+

Example: horizontal addition of two u64x2 vectors

+
  %0     == | 1 |  3 |
+              └─+───┘
+                └───────┐
+                        │
+  %1     == | 4 | -1 |  │
+              └─+──┘    │
+                └───┐   │
+                    │   │
+              ┌─────│───┘
+              ▼     ▼
+%0 + %1  == | 4 |   3 |
+
+

Performance consideration of horizontal operations

+

The result of vertical operations, like vector negation: -a, for a given lane, +does not depend on the result of the operation for the other lanes. The result +of horizontal operations, like the vector sum reduction: a.sum(), depends on +the value of all vector lanes.

+

In virtually all architectures vertical operations are fast, while horizontal +operations are, by comparison, very slow.

+

Consider the following two functions for computing the sum of all f32 values +in a slice:

+

+#![allow(unused)]
+fn main() {
+fn fast_sum(x: &[f32]) -> f32 {
+    assert!(x.len() % 4 == 0);
+    let mut sum = f32x4::splat(0.); // [0., 0., 0., 0.]
+    for i in (0..x.len()).step_by(4) {
+        sum += f32x4::from_slice_unaligned(&x[i..]);
+    }
+    sum.sum()
+}
+
+fn slow_sum(x: &[f32]) -> f32 {
+    assert!(x.len() % 4 == 0);
+    let mut sum: f32 = 0.;
+    for i in (0..x.len()).step_by(4) {
+        sum += f32x4::from_slice_unaligned(&x[i..]).sum();
+    }
+    sum
+}
+}
+
+

The inner loop over the slice is where the bulk of the work actually happens. +There, the fast_sum function perform vertical operations into a vector, doing +a single horizontal reduction at the end, while the slow_sum function performs +horizontal vector operations inside of the loop.

+

On all widely-used architectures, fast_sum is a large constant factor faster +than slow_sum. You can run the slice_sum example and see for yourself. On +the particular machine tested there the algorithm using the horizontal vector +addition is 2.7x slower than the one using vertical vector operations!

+

Performance profiling

+

While the rest of the book provides practical advice on how to improve the performance +of SIMD code, this chapter is dedicated to performance profiling. +Profiling consists of recording a program's execution in order to identify program +hotspots.

+

Important: most profilers require debug information in order to accurately +link the program hotspots back to the corresponding source code lines. Rust will +disable debug info generation by default for optimized builds, but you can change +that in your Cargo.toml.

+

Performance profiling on Linux

+

Using perf

+

perf is the most powerful performance profiler +for Linux, featuring support for various hardware Performance Monitoring Units, +as well as integration with the kernel's performance events framework.

+

We will only look at how can the perf command can be used to profile SIMD code. +Full system profiling is outside of the scope of this book.

+

Recording

+

The first step is to record a program's execution during an average workload. +It helps if you can isolate the parts of your program which have performance +issues, and set up a benchmark which can be easily (re)run.

+

Build the benchmark binary in release mode, after having enabled debug info:

+
$ cargo build --release
+Finished release [optimized + debuginfo] target(s) in 0.02s
+
+

Then use the perf record subcommand:

+
$ perf record --call-graph=dwarf ./target/release/my-program
+[ perf record: Woken up 10 times to write data ]
+[ perf record: Captured and wrote 2,356 MB perf.data (292 samples) ]
+
+

Instead of using --call-graph=dwarf, which can become pretty slow, you can use +--call-graph=lbr if you have a processor with support for Last Branch Record +(i.e. Intel Haswell and newer).

+

perf will, by default, record the count of CPU cycles it takes to execute +various parts of your program. You can use the -e command line option +to enable other performance events, such as cache-misses. Use perf list +to get a list of all hardware counters supported by your CPU.

+

Viewing the report

+

The next step is getting a bird's eye view of the program's execution. +perf provides a ncurses-based interface which will get you started.

+

Use perf report to open a visualization of your program's performance:

+
perf report --hierarchy -M intel
+
+

--hierarchy will display a tree-like structure of where your program spent +most of its time. -M intel enables disassembly output with Intel syntax, which +is subjectively more readable than the default AT&T syntax.

+

Here is the output from profiling the nbody benchmark:

+
- 100,00% nbody
+  - 94,18% nbody
+    + 93,48% [.] nbody_lib::simd::advance
+    + 0,70% [.] nbody_lib::run
+    + 5,06% libc-2.28.so
+
+

If you move with the arrow keys to any node in the tree, you can the press a +to have perf annotate that node. This means it will:

+
    +
  • +

    disassemble the function

    +
  • +
  • +

    associate every instruction with the percentage of time which was spent executing it

    +
  • +
  • +

    interleaves the disassembly with the source code, +assuming it found the debug symbols +(you can use s to toggle this behaviour)

    +
  • +
+

perf will, by default, open the instruction which it identified as being the +hottest spot in the function:

+
0,76  │ movapd xmm2,xmm0
+0,38  │ movhlps xmm2,xmm0
+      │ addpd  xmm2,xmm0
+      │ unpcklpd xmm1,xmm2
+12,50 │ sqrtpd xmm0,xmm1
+1,52  │ mulpd  xmm0,xmm1
+
+

In this case, sqrtpd will be highlighted in red, since that's the instruction +which the CPU spends most of its time executing.

+

Using Valgrind

+

Valgrind is a set of tools which initially helped C/C++ programmers find unsafe +memory accesses in their code. Nowadays the project also has

+
    +
  • +

    a heap profiler called massif

    +
  • +
  • +

    a cache utilization profiler called cachegrind

    +
  • +
  • +

    a call-graph performance profiler called callgrind

    +
  • +
+ +

Machine code analysis tools

+

The microarchitecture of modern CPUs

+

While you might have heard of Instruction Set Architectures, such as x86 or +arm or mips, the term microarchitecture (also written here as µ-arch), +refers to the internal details of an actual family of CPUs, such as Intel's +Haswell or AMD's Jaguar.

+

Replacing scalar code with SIMD code will improve performance on all CPUs +supporting the required vector extensions. +However, due to microarchitectural differences, the actual speed-up at +runtime might vary.

+

Example: a simple example arises when optimizing for AMD K8 CPUs. +The assembly generated for an empty function should look like this:

+
nop
+ret
+
+

The nop is used to align the ret instruction for better performance. +However, the compiler will actually generated the following code:

+
repz ret
+
+

The repz instruction will repeat the following instruction until a certain +condition. Of course, in this situation, the function will simply immediately +return, and the ret instruction is still aligned. +However, AMD K8's branch predictor performs better with the latter code.

+

For those looking to absolutely maximize performance for a certain target µ-arch, +you will have to read some CPU manuals, or ask the compiler to do it for you +with -C target-cpu.

+

Summary of CPU internals

+

Modern processors are able to execute instructions out-of-order for better performance, +by utilizing tricks such as branch prediction, instruction pipelining, +or superscalar execution.

+

SIMD instructions are also subject to these optimizations, meaning it can get pretty +difficult to determine where the slowdown happens. +For example, if the profiler reports a store operation is slow, one of two things +could be happening:

+
    +
  • +

    the store is limited by the CPU's memory bandwidth, which is actually an ideal +scenario, all things considered;

    +
  • +
  • +

    memory bandwidth is nowhere near its peak, but the value to be stored is at the +end of a long chain of operations, and this store is where the profiler +encountered the pipeline stall;

    +
  • +
+

Since most profilers are simple tools which don't understand the subtleties of +instruction scheduling, you

+

Analyzing the machine code

+

Certain tools have knowledge of internal CPU microarchitecture, i.e. they know

+
    +
  • +

    how many physical register files a CPU actually has

    +
  • +
  • +

    what is the latency / throughtput of an instruction

    +
  • +
  • +

    what µ-ops are generated for a set of instructions

    +
  • +
+

and many other architectural details.

+

These tools are therefore able to provide accurate information as to why some +instructions are inefficient, and where the bottleneck is.

+

The disadvantage is that the output of these tools requires advanced knowledge +of the target architecture to understand, i.e. they cannot point out what +the cause of the issue is explicitly.

+

Intel's Architecture Code Analyzer (IACA)

+

IACA is a free tool offered by Intel for analyzing the performance of various +computational kernels.

+

Being a proprietary, closed source tool, it only supports Intel's µ-arches.

+

llvm-mca

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/prof/linux.html b/perf-guide/prof/linux.html new file mode 100644 index 000000000..9ad68e44e --- /dev/null +++ b/perf-guide/prof/linux.html @@ -0,0 +1,315 @@ + + + + + + Profiling on Linux - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Performance profiling on Linux

+

Using perf

+

perf is the most powerful performance profiler +for Linux, featuring support for various hardware Performance Monitoring Units, +as well as integration with the kernel's performance events framework.

+

We will only look at how can the perf command can be used to profile SIMD code. +Full system profiling is outside of the scope of this book.

+

Recording

+

The first step is to record a program's execution during an average workload. +It helps if you can isolate the parts of your program which have performance +issues, and set up a benchmark which can be easily (re)run.

+

Build the benchmark binary in release mode, after having enabled debug info:

+
$ cargo build --release
+Finished release [optimized + debuginfo] target(s) in 0.02s
+
+

Then use the perf record subcommand:

+
$ perf record --call-graph=dwarf ./target/release/my-program
+[ perf record: Woken up 10 times to write data ]
+[ perf record: Captured and wrote 2,356 MB perf.data (292 samples) ]
+
+

Instead of using --call-graph=dwarf, which can become pretty slow, you can use +--call-graph=lbr if you have a processor with support for Last Branch Record +(i.e. Intel Haswell and newer).

+

perf will, by default, record the count of CPU cycles it takes to execute +various parts of your program. You can use the -e command line option +to enable other performance events, such as cache-misses. Use perf list +to get a list of all hardware counters supported by your CPU.

+

Viewing the report

+

The next step is getting a bird's eye view of the program's execution. +perf provides a ncurses-based interface which will get you started.

+

Use perf report to open a visualization of your program's performance:

+
perf report --hierarchy -M intel
+
+

--hierarchy will display a tree-like structure of where your program spent +most of its time. -M intel enables disassembly output with Intel syntax, which +is subjectively more readable than the default AT&T syntax.

+

Here is the output from profiling the nbody benchmark:

+
- 100,00% nbody
+  - 94,18% nbody
+    + 93,48% [.] nbody_lib::simd::advance
+    + 0,70% [.] nbody_lib::run
+    + 5,06% libc-2.28.so
+
+

If you move with the arrow keys to any node in the tree, you can the press a +to have perf annotate that node. This means it will:

+
    +
  • +

    disassemble the function

    +
  • +
  • +

    associate every instruction with the percentage of time which was spent executing it

    +
  • +
  • +

    interleaves the disassembly with the source code, +assuming it found the debug symbols +(you can use s to toggle this behaviour)

    +
  • +
+

perf will, by default, open the instruction which it identified as being the +hottest spot in the function:

+
0,76  │ movapd xmm2,xmm0
+0,38  │ movhlps xmm2,xmm0
+      │ addpd  xmm2,xmm0
+      │ unpcklpd xmm1,xmm2
+12,50 │ sqrtpd xmm0,xmm1
+1,52  │ mulpd  xmm0,xmm1
+
+

In this case, sqrtpd will be highlighted in red, since that's the instruction +which the CPU spends most of its time executing.

+

Using Valgrind

+

Valgrind is a set of tools which initially helped C/C++ programmers find unsafe +memory accesses in their code. Nowadays the project also has

+
    +
  • +

    a heap profiler called massif

    +
  • +
  • +

    a cache utilization profiler called cachegrind

    +
  • +
  • +

    a call-graph performance profiler called callgrind

    +
  • +
+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/prof/mca.html b/perf-guide/prof/mca.html new file mode 100644 index 000000000..64cc75c68 --- /dev/null +++ b/perf-guide/prof/mca.html @@ -0,0 +1,295 @@ + + + + + + Using machine code analyzers - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Machine code analysis tools

+

The microarchitecture of modern CPUs

+

While you might have heard of Instruction Set Architectures, such as x86 or +arm or mips, the term microarchitecture (also written here as µ-arch), +refers to the internal details of an actual family of CPUs, such as Intel's +Haswell or AMD's Jaguar.

+

Replacing scalar code with SIMD code will improve performance on all CPUs +supporting the required vector extensions. +However, due to microarchitectural differences, the actual speed-up at +runtime might vary.

+

Example: a simple example arises when optimizing for AMD K8 CPUs. +The assembly generated for an empty function should look like this:

+
nop
+ret
+
+

The nop is used to align the ret instruction for better performance. +However, the compiler will actually generated the following code:

+
repz ret
+
+

The repz instruction will repeat the following instruction until a certain +condition. Of course, in this situation, the function will simply immediately +return, and the ret instruction is still aligned. +However, AMD K8's branch predictor performs better with the latter code.

+

For those looking to absolutely maximize performance for a certain target µ-arch, +you will have to read some CPU manuals, or ask the compiler to do it for you +with -C target-cpu.

+

Summary of CPU internals

+

Modern processors are able to execute instructions out-of-order for better performance, +by utilizing tricks such as branch prediction, instruction pipelining, +or superscalar execution.

+

SIMD instructions are also subject to these optimizations, meaning it can get pretty +difficult to determine where the slowdown happens. +For example, if the profiler reports a store operation is slow, one of two things +could be happening:

+
    +
  • +

    the store is limited by the CPU's memory bandwidth, which is actually an ideal +scenario, all things considered;

    +
  • +
  • +

    memory bandwidth is nowhere near its peak, but the value to be stored is at the +end of a long chain of operations, and this store is where the profiler +encountered the pipeline stall;

    +
  • +
+

Since most profilers are simple tools which don't understand the subtleties of +instruction scheduling, you

+

Analyzing the machine code

+

Certain tools have knowledge of internal CPU microarchitecture, i.e. they know

+
    +
  • +

    how many physical register files a CPU actually has

    +
  • +
  • +

    what is the latency / throughtput of an instruction

    +
  • +
  • +

    what µ-ops are generated for a set of instructions

    +
  • +
+

and many other architectural details.

+

These tools are therefore able to provide accurate information as to why some +instructions are inefficient, and where the bottleneck is.

+

The disadvantage is that the output of these tools requires advanced knowledge +of the target architecture to understand, i.e. they cannot point out what +the cause of the issue is explicitly.

+

Intel's Architecture Code Analyzer (IACA)

+

IACA is a free tool offered by Intel for analyzing the performance of various +computational kernels.

+

Being a proprietary, closed source tool, it only supports Intel's µ-arches.

+

llvm-mca

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/prof/profiling.html b/perf-guide/prof/profiling.html new file mode 100644 index 000000000..a7ea66003 --- /dev/null +++ b/perf-guide/prof/profiling.html @@ -0,0 +1,237 @@ + + + + + + Performance profiling - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Performance profiling

+

While the rest of the book provides practical advice on how to improve the performance +of SIMD code, this chapter is dedicated to performance profiling. +Profiling consists of recording a program's execution in order to identify program +hotspots.

+

Important: most profilers require debug information in order to accurately +link the program hotspots back to the corresponding source code lines. Rust will +disable debug info generation by default for optimized builds, but you can change +that in your Cargo.toml.

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/searcher.js b/perf-guide/searcher.js new file mode 100644 index 000000000..e1cc806b8 --- /dev/null +++ b/perf-guide/searcher.js @@ -0,0 +1,477 @@ +"use strict"; +window.search = window.search || {}; +(function search(search) { + // Search functionality + // + // You can use !hasFocus() to prevent keyhandling in your key + // event handlers while the user is typing their search. + + if (!Mark || !elasticlunr) { + return; + } + + //IE 11 Compatibility from https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/startsWith + if (!String.prototype.startsWith) { + String.prototype.startsWith = function(search, pos) { + return this.substr(!pos || pos < 0 ? 0 : +pos, search.length) === search; + }; + } + + var search_wrap = document.getElementById('search-wrapper'), + searchbar = document.getElementById('searchbar'), + searchbar_outer = document.getElementById('searchbar-outer'), + searchresults = document.getElementById('searchresults'), + searchresults_outer = document.getElementById('searchresults-outer'), + searchresults_header = document.getElementById('searchresults-header'), + searchicon = document.getElementById('search-toggle'), + content = document.getElementById('content'), + + searchindex = null, + doc_urls = [], + results_options = { + teaser_word_count: 30, + limit_results: 30, + }, + search_options = { + bool: "AND", + expand: true, + fields: { + title: {boost: 1}, + body: {boost: 1}, + breadcrumbs: {boost: 0} + } + }, + mark_exclude = [], + marker = new Mark(content), + current_searchterm = "", + URL_SEARCH_PARAM = 'search', + URL_MARK_PARAM = 'highlight', + teaser_count = 0, + + SEARCH_HOTKEY_KEYCODE = 83, + ESCAPE_KEYCODE = 27, + DOWN_KEYCODE = 40, + UP_KEYCODE = 38, + SELECT_KEYCODE = 13; + + function hasFocus() { + return searchbar === document.activeElement; + } + + function removeChildren(elem) { + while (elem.firstChild) { + elem.removeChild(elem.firstChild); + } + } + + // Helper to parse a url into its building blocks. + function parseURL(url) { + var a = document.createElement('a'); + a.href = url; + return { + source: url, + protocol: a.protocol.replace(':',''), + host: a.hostname, + port: a.port, + params: (function(){ + var ret = {}; + var seg = a.search.replace(/^\?/,'').split('&'); + var len = seg.length, i = 0, s; + for (;i': '>', + '"': '"', + "'": ''' + }; + var repl = function(c) { return MAP[c]; }; + return function(s) { + return s.replace(/[&<>'"]/g, repl); + }; + })(); + + function formatSearchMetric(count, searchterm) { + if (count == 1) { + return count + " search result for '" + searchterm + "':"; + } else if (count == 0) { + return "No search results for '" + searchterm + "'."; + } else { + return count + " search results for '" + searchterm + "':"; + } + } + + function formatSearchResult(result, searchterms) { + var teaser = makeTeaser(escapeHTML(result.doc.body), searchterms); + teaser_count++; + + // The ?URL_MARK_PARAM= parameter belongs inbetween the page and the #heading-anchor + var url = doc_urls[result.ref].split("#"); + if (url.length == 1) { // no anchor found + url.push(""); + } + + return '' + result.doc.breadcrumbs + '' + + '' + + teaser + ''; + } + + function makeTeaser(body, searchterms) { + // The strategy is as follows: + // First, assign a value to each word in the document: + // Words that correspond to search terms (stemmer aware): 40 + // Normal words: 2 + // First word in a sentence: 8 + // Then use a sliding window with a constant number of words and count the + // sum of the values of the words within the window. Then use the window that got the + // maximum sum. If there are multiple maximas, then get the last one. + // Enclose the terms in . + var stemmed_searchterms = searchterms.map(function(w) { + return elasticlunr.stemmer(w.toLowerCase()); + }); + var searchterm_weight = 40; + var weighted = []; // contains elements of ["word", weight, index_in_document] + // split in sentences, then words + var sentences = body.toLowerCase().split('. '); + var index = 0; + var value = 0; + var searchterm_found = false; + for (var sentenceindex in sentences) { + var words = sentences[sentenceindex].split(' '); + value = 8; + for (var wordindex in words) { + var word = words[wordindex]; + if (word.length > 0) { + for (var searchtermindex in stemmed_searchterms) { + if (elasticlunr.stemmer(word).startsWith(stemmed_searchterms[searchtermindex])) { + value = searchterm_weight; + searchterm_found = true; + } + }; + weighted.push([word, value, index]); + value = 2; + } + index += word.length; + index += 1; // ' ' or '.' if last word in sentence + }; + index += 1; // because we split at a two-char boundary '. ' + }; + + if (weighted.length == 0) { + return body; + } + + var window_weight = []; + var window_size = Math.min(weighted.length, results_options.teaser_word_count); + + var cur_sum = 0; + for (var wordindex = 0; wordindex < window_size; wordindex++) { + cur_sum += weighted[wordindex][1]; + }; + window_weight.push(cur_sum); + for (var wordindex = 0; wordindex < weighted.length - window_size; wordindex++) { + cur_sum -= weighted[wordindex][1]; + cur_sum += weighted[wordindex + window_size][1]; + window_weight.push(cur_sum); + }; + + if (searchterm_found) { + var max_sum = 0; + var max_sum_window_index = 0; + // backwards + for (var i = window_weight.length - 1; i >= 0; i--) { + if (window_weight[i] > max_sum) { + max_sum = window_weight[i]; + max_sum_window_index = i; + } + }; + } else { + max_sum_window_index = 0; + } + + // add around searchterms + var teaser_split = []; + var index = weighted[max_sum_window_index][2]; + for (var i = max_sum_window_index; i < max_sum_window_index+window_size; i++) { + var word = weighted[i]; + if (index < word[2]) { + // missing text from index to start of `word` + teaser_split.push(body.substring(index, word[2])); + index = word[2]; + } + if (word[1] == searchterm_weight) { + teaser_split.push("") + } + index = word[2] + word[0].length; + teaser_split.push(body.substring(word[2], index)); + if (word[1] == searchterm_weight) { + teaser_split.push("") + } + }; + + return teaser_split.join(''); + } + + function init(config) { + results_options = config.results_options; + search_options = config.search_options; + searchbar_outer = config.searchbar_outer; + doc_urls = config.doc_urls; + searchindex = elasticlunr.Index.load(config.index); + + // Set up events + searchicon.addEventListener('click', function(e) { searchIconClickHandler(); }, false); + searchbar.addEventListener('keyup', function(e) { searchbarKeyUpHandler(); }, false); + document.addEventListener('keydown', function(e) { globalKeyHandler(e); }, false); + // If the user uses the browser buttons, do the same as if a reload happened + window.onpopstate = function(e) { doSearchOrMarkFromUrl(); }; + // Suppress "submit" events so the page doesn't reload when the user presses Enter + document.addEventListener('submit', function(e) { e.preventDefault(); }, false); + + // If reloaded, do the search or mark again, depending on the current url parameters + doSearchOrMarkFromUrl(); + } + + function unfocusSearchbar() { + // hacky, but just focusing a div only works once + var tmp = document.createElement('input'); + tmp.setAttribute('style', 'position: absolute; opacity: 0;'); + searchicon.appendChild(tmp); + tmp.focus(); + tmp.remove(); + } + + // On reload or browser history backwards/forwards events, parse the url and do search or mark + function doSearchOrMarkFromUrl() { + // Check current URL for search request + var url = parseURL(window.location.href); + if (url.params.hasOwnProperty(URL_SEARCH_PARAM) + && url.params[URL_SEARCH_PARAM] != "") { + showSearch(true); + searchbar.value = decodeURIComponent( + (url.params[URL_SEARCH_PARAM]+'').replace(/\+/g, '%20')); + searchbarKeyUpHandler(); // -> doSearch() + } else { + showSearch(false); + } + + if (url.params.hasOwnProperty(URL_MARK_PARAM)) { + var words = url.params[URL_MARK_PARAM].split(' '); + marker.mark(words, { + exclude: mark_exclude + }); + + var markers = document.querySelectorAll("mark"); + function hide() { + for (var i = 0; i < markers.length; i++) { + markers[i].classList.add("fade-out"); + window.setTimeout(function(e) { marker.unmark(); }, 300); + } + } + for (var i = 0; i < markers.length; i++) { + markers[i].addEventListener('click', hide); + } + } + } + + // Eventhandler for keyevents on `document` + function globalKeyHandler(e) { + if (e.altKey || e.ctrlKey || e.metaKey || e.shiftKey || e.target.type === 'textarea' || e.target.type === 'text') { return; } + + if (e.keyCode === ESCAPE_KEYCODE) { + e.preventDefault(); + searchbar.classList.remove("active"); + setSearchUrlParameters("", + (searchbar.value.trim() !== "") ? "push" : "replace"); + if (hasFocus()) { + unfocusSearchbar(); + } + showSearch(false); + marker.unmark(); + } else if (!hasFocus() && e.keyCode === SEARCH_HOTKEY_KEYCODE) { + e.preventDefault(); + showSearch(true); + window.scrollTo(0, 0); + searchbar.select(); + } else if (hasFocus() && e.keyCode === DOWN_KEYCODE) { + e.preventDefault(); + unfocusSearchbar(); + searchresults.firstElementChild.classList.add("focus"); + } else if (!hasFocus() && (e.keyCode === DOWN_KEYCODE + || e.keyCode === UP_KEYCODE + || e.keyCode === SELECT_KEYCODE)) { + // not `:focus` because browser does annoying scrolling + var focused = searchresults.querySelector("li.focus"); + if (!focused) return; + e.preventDefault(); + if (e.keyCode === DOWN_KEYCODE) { + var next = focused.nextElementSibling; + if (next) { + focused.classList.remove("focus"); + next.classList.add("focus"); + } + } else if (e.keyCode === UP_KEYCODE) { + focused.classList.remove("focus"); + var prev = focused.previousElementSibling; + if (prev) { + prev.classList.add("focus"); + } else { + searchbar.select(); + } + } else { // SELECT_KEYCODE + window.location.assign(focused.querySelector('a')); + } + } + } + + function showSearch(yes) { + if (yes) { + search_wrap.classList.remove('hidden'); + searchicon.setAttribute('aria-expanded', 'true'); + } else { + search_wrap.classList.add('hidden'); + searchicon.setAttribute('aria-expanded', 'false'); + var results = searchresults.children; + for (var i = 0; i < results.length; i++) { + results[i].classList.remove("focus"); + } + } + } + + function showResults(yes) { + if (yes) { + searchresults_outer.classList.remove('hidden'); + } else { + searchresults_outer.classList.add('hidden'); + } + } + + // Eventhandler for search icon + function searchIconClickHandler() { + if (search_wrap.classList.contains('hidden')) { + showSearch(true); + window.scrollTo(0, 0); + searchbar.select(); + } else { + showSearch(false); + } + } + + // Eventhandler for keyevents while the searchbar is focused + function searchbarKeyUpHandler() { + var searchterm = searchbar.value.trim(); + if (searchterm != "") { + searchbar.classList.add("active"); + doSearch(searchterm); + } else { + searchbar.classList.remove("active"); + showResults(false); + removeChildren(searchresults); + } + + setSearchUrlParameters(searchterm, "push_if_new_search_else_replace"); + + // Remove marks + marker.unmark(); + } + + // Update current url with ?URL_SEARCH_PARAM= parameter, remove ?URL_MARK_PARAM and #heading-anchor . + // `action` can be one of "push", "replace", "push_if_new_search_else_replace" + // and replaces or pushes a new browser history item. + // "push_if_new_search_else_replace" pushes if there is no `?URL_SEARCH_PARAM=abc` yet. + function setSearchUrlParameters(searchterm, action) { + var url = parseURL(window.location.href); + var first_search = ! url.params.hasOwnProperty(URL_SEARCH_PARAM); + if (searchterm != "" || action == "push_if_new_search_else_replace") { + url.params[URL_SEARCH_PARAM] = searchterm; + delete url.params[URL_MARK_PARAM]; + url.hash = ""; + } else { + delete url.params[URL_SEARCH_PARAM]; + } + // A new search will also add a new history item, so the user can go back + // to the page prior to searching. A updated search term will only replace + // the url. + if (action == "push" || (action == "push_if_new_search_else_replace" && first_search) ) { + history.pushState({}, document.title, renderURL(url)); + } else if (action == "replace" || (action == "push_if_new_search_else_replace" && !first_search) ) { + history.replaceState({}, document.title, renderURL(url)); + } + } + + function doSearch(searchterm) { + + // Don't search the same twice + if (current_searchterm == searchterm) { return; } + else { current_searchterm = searchterm; } + + if (searchindex == null) { return; } + + // Do the actual search + var results = searchindex.search(searchterm, search_options); + var resultcount = Math.min(results.length, results_options.limit_results); + + // Display search metrics + searchresults_header.innerText = formatSearchMetric(resultcount, searchterm); + + // Clear and insert results + var searchterms = searchterm.split(' '); + removeChildren(searchresults); + for(var i = 0; i < resultcount ; i++){ + var resultElem = document.createElement('li'); + resultElem.innerHTML = formatSearchResult(results[i], searchterms); + searchresults.appendChild(resultElem); + } + + // Display results + showResults(true); + } + + fetch(path_to_root + 'searchindex.json') + .then(response => response.json()) + .then(json => init(json)) + .catch(error => { // Try to load searchindex.js if fetch failed + var script = document.createElement('script'); + script.src = path_to_root + 'searchindex.js'; + script.onload = () => init(window.search); + document.head.appendChild(script); + }); + + // Exported functions + search.hasFocus = hasFocus; +})(window.search); diff --git a/perf-guide/searchindex.js b/perf-guide/searchindex.js new file mode 100644 index 000000000..a70d0d120 --- /dev/null +++ b/perf-guide/searchindex.js @@ -0,0 +1 @@ +Object.assign(window.search, {"doc_urls":["introduction.html#introduction","introduction.html#what-is-simd","introduction.html#history-of-simd-in-rust","introduction.html#discover-packed_simd","float-math/fp.html#floating-point-math","float-math/svml.html#short-vector-math-library","float-math/approx.html#approximate-functions","float-math/fma.html#fused-multiply-add","target-feature/features.html#enabling-target-features","target-feature/rustflags.html#using-rustflags","target-feature/rustflags.html#target-feature","target-feature/rustflags.html#target-cpu","target-feature/attribute.html#the-target_feature-attribute","target-feature/inlining.html#inlining","target-feature/runtime.html#detecting-host-features-at-runtime","bound_checks.html#bounds-checking","vert-hor-ops.html#vertical-and-horizontal-operations","vert-hor-ops.html#performance-consideration-of-horizontal-operations","prof/profiling.html#performance-profiling","prof/linux.html#performance-profiling-on-linux","prof/linux.html#using-perf","prof/linux.html#recording","prof/linux.html#viewing-the-report","prof/linux.html#using-valgrind","prof/mca.html#machine-code-analysis-tools","prof/mca.html#the-microarchitecture-of-modern-cpus","prof/mca.html#summary-of-cpu-internals","prof/mca.html#analyzing-the-machine-code","prof/mca.html#intels-architecture-code-analyzer-iaca","prof/mca.html#llvm-mca"],"index":{"documentStore":{"docInfo":{"0":{"body":0,"breadcrumbs":1,"title":1},"1":{"body":0,"breadcrumbs":1,"title":1},"10":{"body":102,"breadcrumbs":4,"title":2},"11":{"body":92,"breadcrumbs":4,"title":2},"12":{"body":0,"breadcrumbs":4,"title":2},"13":{"body":0,"breadcrumbs":3,"title":1},"14":{"body":0,"breadcrumbs":6,"title":4},"15":{"body":74,"breadcrumbs":2,"title":2},"16":{"body":77,"breadcrumbs":3,"title":3},"17":{"body":130,"breadcrumbs":4,"title":4},"18":{"body":47,"breadcrumbs":2,"title":2},"19":{"body":0,"breadcrumbs":5,"title":3},"2":{"body":0,"breadcrumbs":3,"title":3},"20":{"body":31,"breadcrumbs":4,"title":2},"21":{"body":113,"breadcrumbs":3,"title":1},"22":{"body":127,"breadcrumbs":4,"title":2},"23":{"body":29,"breadcrumbs":4,"title":2},"24":{"body":0,"breadcrumbs":6,"title":4},"25":{"body":115,"breadcrumbs":5,"title":3},"26":{"body":71,"breadcrumbs":5,"title":3},"27":{"body":47,"breadcrumbs":5,"title":3},"28":{"body":18,"breadcrumbs":7,"title":5},"29":{"body":0,"breadcrumbs":4,"title":2},"3":{"body":29,"breadcrumbs":2,"title":2},"4":{"body":8,"breadcrumbs":3,"title":3},"5":{"body":0,"breadcrumbs":7,"title":4},"6":{"body":0,"breadcrumbs":5,"title":2},"7":{"body":0,"breadcrumbs":6,"title":3},"8":{"body":55,"breadcrumbs":3,"title":3},"9":{"body":35,"breadcrumbs":4,"title":2}},"docs":{"0":{"body":"","breadcrumbs":"Introduction","id":"0","title":"Introduction"},"1":{"body":"","breadcrumbs":"What is SIMD","id":"1","title":"What is SIMD"},"10":{"body":"Syntax: -C target-feature= Provides the compiler with a comma-separated set of instruction extensions to enable. Example : Use -C target-features=+sse3,+avx to enable generating instructions for Streaming SIMD Extensions 3 and Advanced Vector Extensions . To list target triples for all targets supported by Rust, use: rustc --print target-list To list all support target features for a certain target triple, use: rustc --target=${TRIPLE} --print target-features Note that all CPU features are independent, and will have to be enabled individually. Example : Setting -C target-features=+avx2 will not enable fma, even though all CPUs which support AVX2 also support FMA. To enable both, one has to use -C target-features=+avx2,+fma Some features also depend on other features, which need to be enabled for the target instructions to be generated. Example : Unless v7 is specified as the target CPU (see below), to enable NEON on ARM it is necessary to use -C target-feature=+v7,+neon.","breadcrumbs":"Target features » target-feature","id":"10","title":"target-feature"},"11":{"body":"Syntax: -C target-cpu= Sets the identifier of a CPU family / model for which to build and optimize the code. Example : RUSTFLAGS='-C target-cpu=cortex-a75' To list all supported target CPUs for a certain target triple, use: rustc --target=${TRIPLE} --print target-cpus Example : rustc --target=i686-pc-windows-msvc --print target-cpus The compiler will translate this into a list of target features. Therefore, individual feature checks (#[cfg(target_feature = \"...\")]) will still work properly. It will cause the code generator to optimize the generated code for that specific CPU model. Using native as the CPU model will cause Rust to generate and optimize code for the CPU running the compiler. It is useful when building programs which you plan to only use locally. This should never be used when the generated programs are meant to be run on other computers, such as when packaging for distribution or cross-compiling.","breadcrumbs":"Target features » target-cpu","id":"11","title":"target-cpu"},"12":{"body":"","breadcrumbs":"Target features » The target_feature attribute","id":"12","title":"The target_feature attribute"},"13":{"body":"","breadcrumbs":"Target features » Inlining","id":"13","title":"Inlining"},"14":{"body":"","breadcrumbs":"Target features » Detecting host features at runtime","id":"14","title":"Detecting host features at runtime"},"15":{"body":"Reading and writing packed vectors to/from slices is checked by default. Independently of the configuration options used, the safe functions: Simd<[T; N]>::from_slice_aligned(& s[..]) Simd<[T; N]>::write_to_slice_aligned(&mut s[..]) always check that: the slice is big enough to hold the vector the slice is suitably aligned to perform an aligned load/store for a Simd<[T; N]> (this alignment is often much larger than that of T). There are _unaligned versions that use unaligned load and stores, as well as unsafe _unchecked that do not perform any checks iff debug-assertions = false / debug = false. That is, the _unchecked methods do still assert size and alignment in debug builds and could also do so in release builds depending on the configuration options. These assertions do often significantly impact performance and you should be aware of them.","breadcrumbs":"Bounds checking","id":"15","title":"Bounds checking"},"16":{"body":"In SIMD terminology, each vector has a certain \"width\" (number of lanes). A vector processor is able to perform two kinds of operations on a vector: Vertical operations: operate on two vectors of the same width, result has same width Example : vertical addition of two f32x4 vectors %0 == | 2 | -3.5 | 0 | 7 | + + + + %1 == | 4 | 1.5 | -1 | 0 | = = = =\n%0 + %1 == | 6 | -2 | -1 | 7 | Horizontal operations: reduce the elements of two vectors in some way, the result's elements combine information from the two original ones Example : horizontal addition of two u64x2 vectors %0 == | 1 | 3 | └─+───┘ └───────┐ │ %1 == | 4 | -1 | │ └─+──┘ │ └───┐ │ │ │ ┌─────│───┘ ▼ ▼\n%0 + %1 == | 4 | 3 |","breadcrumbs":"Vertical and horizontal operations","id":"16","title":"Vertical and horizontal operations"},"17":{"body":"The result of vertical operations, like vector negation: -a, for a given lane, does not depend on the result of the operation for the other lanes. The result of horizontal operations, like the vector sum reduction: a.sum(), depends on the value of all vector lanes. In virtually all architectures vertical operations are fast, while horizontal operations are, by comparison, very slow. Consider the following two functions for computing the sum of all f32 values in a slice: fn fast_sum(x: &[f32]) -> f32 { assert!(x.len() % 4 == 0); let mut sum = f32x4::splat(0.); // [0., 0., 0., 0.] for i in (0..x.len()).step_by(4) { sum += f32x4::from_slice_unaligned(&x[i..]); } sum.sum()\n} fn slow_sum(x: &[f32]) -> f32 { assert!(x.len() % 4 == 0); let mut sum: f32 = 0.; for i in (0..x.len()).step_by(4) { sum += f32x4::from_slice_unaligned(&x[i..]).sum(); } sum\n} The inner loop over the slice is where the bulk of the work actually happens. There, the fast_sum function perform vertical operations into a vector, doing a single horizontal reduction at the end, while the slow_sum function performs horizontal vector operations inside of the loop. On all widely-used architectures, fast_sum is a large constant factor faster than slow_sum. You can run the slice_sum example and see for yourself. On the particular machine tested there the algorithm using the horizontal vector addition is 2.7x slower than the one using vertical vector operations!","breadcrumbs":"Performance consideration of horizontal operations","id":"17","title":"Performance consideration of horizontal operations"},"18":{"body":"While the rest of the book provides practical advice on how to improve the performance of SIMD code, this chapter is dedicated to performance profiling . Profiling consists of recording a program's execution in order to identify program hotspots. Important : most profilers require debug information in order to accurately link the program hotspots back to the corresponding source code lines. Rust will disable debug info generation by default for optimized builds, but you can change that in your Cargo.toml .","breadcrumbs":"Performance profiling","id":"18","title":"Performance profiling"},"19":{"body":"","breadcrumbs":"Performance profiling » Performance profiling on Linux","id":"19","title":"Performance profiling on Linux"},"2":{"body":"","breadcrumbs":"History of SIMD in Rust","id":"2","title":"History of SIMD in Rust"},"20":{"body":"perf is the most powerful performance profiler for Linux, featuring support for various hardware Performance Monitoring Units, as well as integration with the kernel's performance events framework. We will only look at how can the perf command can be used to profile SIMD code. Full system profiling is outside of the scope of this book.","breadcrumbs":"Performance profiling » Using perf","id":"20","title":"Using perf"},"21":{"body":"The first step is to record a program's execution during an average workload. It helps if you can isolate the parts of your program which have performance issues, and set up a benchmark which can be easily (re)run. Build the benchmark binary in release mode, after having enabled debug info: $ cargo build --release\nFinished release [optimized + debuginfo] target(s) in 0.02s Then use the perf record subcommand: $ perf record --call-graph=dwarf ./target/release/my-program\n[ perf record: Woken up 10 times to write data ]\n[ perf record: Captured and wrote 2,356 MB perf.data (292 samples) ] Instead of using --call-graph=dwarf, which can become pretty slow, you can use --call-graph=lbr if you have a processor with support for Last Branch Record (i.e. Intel Haswell and newer). perf will, by default, record the count of CPU cycles it takes to execute various parts of your program. You can use the -e command line option to enable other performance events, such as cache-misses. Use perf list to get a list of all hardware counters supported by your CPU.","breadcrumbs":"Performance profiling » Recording","id":"21","title":"Recording"},"22":{"body":"The next step is getting a bird's eye view of the program's execution. perf provides a ncurses-based interface which will get you started. Use perf report to open a visualization of your program's performance: perf report --hierarchy -M intel --hierarchy will display a tree-like structure of where your program spent most of its time. -M intel enables disassembly output with Intel syntax, which is subjectively more readable than the default AT&T syntax. Here is the output from profiling the nbody benchmark: - 100,00% nbody - 94,18% nbody + 93,48% [.] nbody_lib::simd::advance + 0,70% [.] nbody_lib::run + 5,06% libc-2.28.so If you move with the arrow keys to any node in the tree, you can the press a to have perf annotate that node. This means it will: disassemble the function associate every instruction with the percentage of time which was spent executing it interleaves the disassembly with the source code, assuming it found the debug symbols (you can use s to toggle this behaviour) perf will, by default, open the instruction which it identified as being the hottest spot in the function: 0,76 │ movapd xmm2,xmm0\n0,38 │ movhlps xmm2,xmm0 │ addpd xmm2,xmm0 │ unpcklpd xmm1,xmm2\n12,50 │ sqrtpd xmm0,xmm1\n1,52 │ mulpd xmm0,xmm1 In this case, sqrtpd will be highlighted in red, since that's the instruction which the CPU spends most of its time executing.","breadcrumbs":"Performance profiling » Viewing the report","id":"22","title":"Viewing the report"},"23":{"body":"Valgrind is a set of tools which initially helped C/C++ programmers find unsafe memory accesses in their code. Nowadays the project also has a heap profiler called massif a cache utilization profiler called cachegrind a call-graph performance profiler called callgrind","breadcrumbs":"Performance profiling » Using Valgrind","id":"23","title":"Using Valgrind"},"24":{"body":"","breadcrumbs":"Performance profiling » Machine code analysis tools","id":"24","title":"Machine code analysis tools"},"25":{"body":"While you might have heard of Instruction Set Architectures, such as x86 or arm or mips, the term microarchitecture (also written here as µ-arch ), refers to the internal details of an actual family of CPUs, such as Intel's Haswell or AMD's Jaguar . Replacing scalar code with SIMD code will improve performance on all CPUs supporting the required vector extensions. However, due to microarchitectural differences, the actual speed-up at runtime might vary. Example : a simple example arises when optimizing for AMD K8 CPUs. The assembly generated for an empty function should look like this: nop\nret The nop is used to align the ret instruction for better performance. However, the compiler will actually generated the following code: repz ret The repz instruction will repeat the following instruction until a certain condition. Of course, in this situation, the function will simply immediately return, and the ret instruction is still aligned. However, AMD K8's branch predictor performs better with the latter code. For those looking to absolutely maximize performance for a certain target µ-arch, you will have to read some CPU manuals, or ask the compiler to do it for you with -C target-cpu.","breadcrumbs":"Performance profiling » The microarchitecture of modern CPUs","id":"25","title":"The microarchitecture of modern CPUs"},"26":{"body":"Modern processors are able to execute instructions out-of-order for better performance, by utilizing tricks such as branch prediction , instruction pipelining , or superscalar execution . SIMD instructions are also subject to these optimizations, meaning it can get pretty difficult to determine where the slowdown happens. For example, if the profiler reports a store operation is slow, one of two things could be happening: the store is limited by the CPU's memory bandwidth, which is actually an ideal scenario, all things considered; memory bandwidth is nowhere near its peak, but the value to be stored is at the end of a long chain of operations, and this store is where the profiler encountered the pipeline stall; Since most profilers are simple tools which don't understand the subtleties of instruction scheduling, you","breadcrumbs":"Performance profiling » Summary of CPU internals","id":"26","title":"Summary of CPU internals"},"27":{"body":"Certain tools have knowledge of internal CPU microarchitecture, i.e. they know how many physical register files a CPU actually has what is the latency / throughtput of an instruction what µ-ops are generated for a set of instructions and many other architectural details. These tools are therefore able to provide accurate information as to why some instructions are inefficient, and where the bottleneck is. The disadvantage is that the output of these tools requires advanced knowledge of the target architecture to understand, i.e. they cannot point out what the cause of the issue is explicitly.","breadcrumbs":"Performance profiling » Analyzing the machine code","id":"27","title":"Analyzing the machine code"},"28":{"body":"IACA is a free tool offered by Intel for analyzing the performance of various computational kernels. Being a proprietary, closed source tool, it only supports Intel's µ-arches.","breadcrumbs":"Performance profiling » Intel's Architecture Code Analyzer (IACA)","id":"28","title":"Intel's Architecture Code Analyzer (IACA)"},"29":{"body":"","breadcrumbs":"Performance profiling » llvm-mca","id":"29","title":"llvm-mca"},"3":{"body":"Writing fast and portable SIMD algorithms using packed_simd is, unfortunately, not trivial. There are many pitfals that one should be aware of, and some idioms that help avoid those pitfalls. This book attempts to document these best practices and provides practical examples on how to apply the tips to your code.","breadcrumbs":"Discover packed_simd","id":"3","title":"Discover packed_simd"},"4":{"body":"This chapter contains information pertaining to working with floating-point numbers.","breadcrumbs":"Floating-point math","id":"4","title":"Floating-point math"},"5":{"body":"","breadcrumbs":"Floating-point Math » Short Vector Math Library","id":"5","title":"Short Vector Math Library"},"6":{"body":"","breadcrumbs":"Floating-point Math » Approximate functions","id":"6","title":"Approximate functions"},"7":{"body":"","breadcrumbs":"Floating-point Math » Fused Multiply Add","id":"7","title":"Fused Multiply Add"},"8":{"body":"Not all processors of a certain architecture will have SIMD processing units, and using a SIMD instruction which is not supported will trigger undefined behavior. To allow building safe, portable programs, the Rust compiler will not , by default, generate any sort of vector instructions, unless it can statically determine they are supported. For example, on AMD64, SSE2 support is architecturally guaranteed. The x86_64-apple-darwin target enables up to SSSE3. The get a defintive list of which features are enabled by default on various platforms, refer to the target specifications in the compiler's source code .","breadcrumbs":"Enabling target features","id":"8","title":"Enabling target features"},"9":{"body":"One of the easiest ways to benefit from SIMD is to allow the compiler to generate code using certain vector instruction extensions. The environment variable RUSTFLAGS can be used to pass options for code generation to the Rust compiler. These flags will affect all compiled crates. There are two flags which can be used to enable specific vector extensions:","breadcrumbs":"Target features » Using RUSTFLAGS","id":"9","title":"Using RUSTFLAGS"}},"length":30,"save":true},"fields":["title","body","breadcrumbs"],"index":{"body":{"root":{"0":{",":{"3":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"7":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{".":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"(":{")":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"_":{"b":{"df":0,"docs":{},"y":{"(":{"4":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}}},"0":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":2,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":2.6457513110645907}}},"1":{",":{"5":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"0":{"0":{",":{"0":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"21":{"tf":1.0}}},"2":{",":{"5":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":2.8284271247461903}}},"2":{",":{"3":{"5":{"6":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"2":{"8":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"7":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"9":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"3":{".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"df":2,"docs":{"10":{"tf":1.0},"16":{"tf":1.4142135623730951}}},"4":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}},"5":{",":{"0":{"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"6":{"df":1,"docs":{"16":{"tf":1.0}}},"7":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"9":{"3":{",":{"4":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"4":{",":{"1":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"a":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"7":{"5":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"b":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}}}}},"c":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"23":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":4,"docs":{"17":{"tf":1.0},"25":{"tf":1.7320508075688772},"26":{"tf":1.0},"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"d":{"d":{"df":1,"docs":{"7":{"tf":1.0}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.4142135623730951},"17":{"tf":1.0}}}},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"c":{"df":2,"docs":{"10":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}}}},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":2,"docs":{"15":{"tf":2.0},"25":{"tf":1.4142135623730951}}}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"8":{"tf":1.0},"9":{"tf":1.0}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}}},"m":{"d":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":1,"docs":{"24":{"tf":1.0}}}},"z":{"df":2,"docs":{"27":{"tf":1.0},"28":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"8":{"tf":1.0}},"i":{"df":1,"docs":{"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"6":{"tf":1.0}}}}}}}}},"r":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"28":{"tf":1.0}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":5,"docs":{"17":{"tf":1.4142135623730951},"25":{"tf":1.0},"27":{"tf":1.4142135623730951},"28":{"tf":1.0},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}},"m":{"df":2,"docs":{"10":{"tf":1.0},"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"k":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"t":{"!":{"(":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":1,"docs":{"15":{"tf":1.7320508075688772}}}}},"o":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"t":{"&":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"12":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"3":{"tf":1.0}}},"df":0,"docs":{}}},"x":{"2":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"15":{"tf":1.0},"3":{"tf":1.0}}}},"df":0,"docs":{}}},"b":{"a":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"18":{"tf":1.0}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":2,"docs":{"22":{"tf":1.0},"28":{"tf":1.0}},"h":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"10":{"tf":1.0}}}}},"n":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":2,"docs":{"21":{"tf":1.4142135623730951},"22":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}}}},"i":{"df":0,"docs":{},"g":{"df":1,"docs":{"15":{"tf":1.0}}},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}},"r":{"d":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"o":{"df":0,"docs":{},"k":{"df":3,"docs":{"18":{"tf":1.0},"20":{"tf":1.0},"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"n":{"c":{"df":0,"docs":{},"h":{"df":3,"docs":{"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"d":{"df":5,"docs":{"11":{"tf":1.4142135623730951},"15":{"tf":1.4142135623730951},"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"k":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"c":{"/":{"c":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"21":{"tf":1.0},"23":{"tf":1.0}},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.7320508075688772},"23":{"tf":2.0}},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{".":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"u":{"df":0,"docs":{},"s":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"df":3,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":1.4142135623730951},"25":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":7,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"16":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}}}}},"df":0,"docs":{}}}},"f":{"df":0,"docs":{},"g":{"(":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"18":{"tf":1.0}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"4":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":2,"docs":{"11":{"tf":1.0},"15":{"tf":2.0}}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":12,"docs":{"11":{"tf":2.0},"18":{"tf":1.4142135623730951},"20":{"tf":1.0},"22":{"tf":1.0},"23":{"tf":1.0},"24":{"tf":1.0},"25":{"tf":2.0},"27":{"tf":1.0},"28":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}},"n":{"d":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"p":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":1.0},"11":{"tf":1.7320508075688772},"25":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.7320508075688772}},"e":{"df":0,"docs":{},"r":{"'":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}}},"u":{"df":0,"docs":{},"t":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"28":{"tf":1.0}}}}}},"n":{"d":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"t":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}},"r":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"u":{"'":{"df":1,"docs":{"26":{"tf":1.0}}},"=":{"<":{"c":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"x":{"df":1,"docs":{"11":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"df":7,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":2.8284271247461903},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"25":{"tf":2.449489742783178},"26":{"tf":1.0},"27":{"tf":1.4142135623730951}}}},"r":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"9":{"tf":1.0}}}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"y":{"c":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"d":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":4,"docs":{"15":{"tf":1.7320508075688772},"18":{"tf":1.4142135623730951},"21":{"tf":1.0},"22":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":1,"docs":{"21":{"tf":1.0}}}}}}}}},"d":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":5,"docs":{"15":{"tf":1.0},"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":3,"docs":{"10":{"tf":1.0},"15":{"tf":1.0},"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":2,"docs":{"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"s":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}},"d":{"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}}}}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":1,"docs":{"3":{"tf":1.0}}}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"o":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"df":1,"docs":{"17":{"tf":1.0}},"n":{"'":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"e":{"df":1,"docs":{"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"a":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.0}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}}}}}}},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"a":{"b":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":2.6457513110645907},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"8":{"tf":1.7320508075688772},"9":{"tf":1.0}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"9":{"tf":1.0}}}}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}},"t":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}}}},"x":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":8,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"16":{"tf":1.4142135623730951},"17":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":4,"docs":{"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"22":{"tf":1.7320508075688772},"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}}}}},"df":0,"docs":{}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":3,"docs":{"10":{"tf":1.7320508075688772},"25":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"f":{"3":{"2":{"df":1,"docs":{"17":{"tf":2.449489742783178}},"x":{"4":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"x":{"[":{"df":0,"docs":{},"i":{".":{".":{"]":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"(":{"0":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"s":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":2,"docs":{"11":{"tf":1.0},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":5,"docs":{"10":{"tf":2.449489742783178},"11":{"tf":1.4142135623730951},"14":{"tf":1.0},"20":{"tf":1.0},"8":{"tf":1.4142135623730951}},"e":{"=":{"+":{"df":0,"docs":{},"v":{"7":{",":{"+":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"<":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"s":{"=":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"2":{",":{"+":{"df":0,"docs":{},"f":{"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"3":{",":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":1,"docs":{"27":{"tf":1.0}}}},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"h":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"l":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"4":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"m":{"a":{"df":1,"docs":{"10":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"17":{"tf":1.0},"25":{"tf":1.4142135623730951}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"20":{"tf":1.0}}}},"n":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":5,"docs":{"15":{"tf":1.0},"17":{"tf":1.7320508075688772},"22":{"tf":1.4142135623730951},"25":{"tf":1.4142135623730951},"6":{"tf":1.0}}}}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"7":{"tf":1.0}}}}}},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":7,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":2.0},"18":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"r":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"h":{"=":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"f":{"df":1,"docs":{"21":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"b":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}},"u":{"a":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"r":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.0},"25":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"p":{"df":1,"docs":{"23":{"tf":1.0}}},"r":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":3,"docs":{"21":{"tf":1.0},"23":{"tf":1.0},"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"22":{"tf":1.0},"25":{"tf":1.0}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"2":{"tf":1.0}}}}}}}},"o":{"df":0,"docs":{},"l":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"z":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":2.449489742783178}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}},"t":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"i":{".":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.4142135623730951}}},"a":{"c":{"a":{"df":1,"docs":{"28":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":0,"docs":{}},"d":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":3,"docs":{"11":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":1,"docs":{"15":{"tf":1.0}}}},"m":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"a":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":2,"docs":{"18":{"tf":1.0},"25":{"tf":1.0}}}}}}},"n":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"10":{"tf":1.0},"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"u":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}},"f":{"df":0,"docs":{},"o":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}},"r":{"df":0,"docs":{},"m":{"df":4,"docs":{"16":{"tf":1.0},"18":{"tf":1.0},"27":{"tf":1.0},"4":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"23":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"13":{"tf":1.0}}}}},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"e":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":1.7320508075688772},"22":{"tf":1.7320508075688772},"25":{"tf":2.23606797749979},"26":{"tf":2.0},"27":{"tf":1.7320508075688772},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}}},"df":0,"docs":{}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}},"l":{"'":{"df":2,"docs":{"25":{"tf":1.0},"28":{"tf":1.4142135623730951}}},"df":3,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772},"28":{"tf":1.0}}},"r":{"df":0,"docs":{},"f":{"a":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"n":{"df":3,"docs":{"25":{"tf":1.0},"26":{"tf":1.0},"27":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"0":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"s":{"df":0,"docs":{},"u":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.0}}}}}},"j":{"a":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"k":{"8":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"'":{"df":1,"docs":{"20":{"tf":1.0}}},"df":1,"docs":{"28":{"tf":1.0}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"27":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}},"r":{"df":0,"docs":{},"g":{"df":1,"docs":{"17":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"df":0,"docs":{},"i":{"b":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"5":{"tf":1.0}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}}},"k":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"x":{"df":2,"docs":{"19":{"tf":1.0},"20":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"t":{"df":4,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"v":{"df":0,"docs":{},"m":{"df":1,"docs":{"29":{"tf":1.0}}}}},"o":{"a":{"d":{"/":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"c":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.0}}}},"o":{"df":0,"docs":{},"k":{"df":2,"docs":{"20":{"tf":1.0},"25":{"tf":1.4142135623730951}}},"p":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"m":{"a":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":3,"docs":{"17":{"tf":1.0},"24":{"tf":1.0},"27":{"tf":1.0}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":2,"docs":{"27":{"tf":1.4142135623730951},"3":{"tf":1.0}}},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":1,"docs":{"23":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"h":{"df":2,"docs":{"4":{"tf":1.0},"5":{"tf":1.0}}}},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"b":{"df":1,"docs":{"21":{"tf":1.0}}},"c":{"a":{"df":1,"docs":{"29":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"22":{"tf":1.4142135623730951}},"e":{"a":{"df":0,"docs":{},"n":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"o":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"c":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.7320508075688772},"27":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"21":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":1,"docs":{"11":{"tf":1.7320508075688772}}},"r":{"df":0,"docs":{},"n":{"df":2,"docs":{"25":{"tf":1.0},"26":{"tf":1.0}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"v":{"a":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":1,"docs":{"22":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"v":{"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}}},"u":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"7":{"tf":1.0}}}}}}}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}},"n":{"]":{">":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"m":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"v":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"b":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"y":{"_":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"b":{":":{":":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{":":{":":{"a":{"d":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}},"g":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}},"x":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"10":{"tf":1.0}}}},"w":{"a":{"d":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"16":{"tf":1.0},"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"o":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"n":{"df":6,"docs":{"10":{"tf":1.0},"16":{"tf":1.0},"17":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"9":{"tf":1.0}}},"p":{"df":1,"docs":{"27":{"tf":1.0}},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"r":{"df":3,"docs":{"16":{"tf":2.23606797749979},"17":{"tf":3.0},"26":{"tf":1.4142135623730951}}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":5,"docs":{"11":{"tf":1.7320508075688772},"18":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}},"o":{"df":0,"docs":{},"n":{"df":3,"docs":{"15":{"tf":1.4142135623730951},"21":{"tf":1.0},"9":{"tf":1.0}}}}}}},"r":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"20":{"tf":1.0}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"p":{"a":{"c":{"df":0,"docs":{},"k":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"d":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.4142135623730951}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"9":{"tf":1.0}}}}},"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"f":{".":{"d":{"a":{"df":0,"docs":{},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":3,"docs":{"20":{"tf":1.7320508075688772},"21":{"tf":2.449489742783178},"22":{"tf":2.23606797749979}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":12,"docs":{"15":{"tf":1.7320508075688772},"16":{"tf":1.0},"17":{"tf":1.7320508075688772},"18":{"tf":1.7320508075688772},"19":{"tf":1.0},"20":{"tf":1.7320508075688772},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"23":{"tf":1.0},"25":{"tf":2.0},"26":{"tf":1.0},"28":{"tf":1.0}}}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"h":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}}}},"t":{"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":1,"docs":{"11":{"tf":1.0}}},"t":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"8":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"27":{"tf":1.0},"4":{"tf":1.4142135623730951}}}}},"r":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":2,"docs":{"3":{"tf":1.0},"8":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"r":{"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"18":{"tf":1.0},"3":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":2,"docs":{"21":{"tf":1.0},"26":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}}}},"o":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"8":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":4,"docs":{"16":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":6,"docs":{"18":{"tf":2.0},"19":{"tf":1.0},"20":{"tf":1.7320508075688772},"22":{"tf":1.0},"23":{"tf":1.7320508075688772},"26":{"tf":1.7320508075688772}}}}},"g":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"m":{"'":{"df":3,"docs":{"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951}}},"df":5,"docs":{"11":{"tf":1.4142135623730951},"18":{"tf":1.4142135623730951},"21":{"tf":1.7320508075688772},"22":{"tf":1.0},"8":{"tf":1.0}},"m":{"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}}},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"28":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"v":{"df":0,"docs":{},"i":{"d":{"df":5,"docs":{"10":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0},"27":{"tf":1.0},"3":{"tf":1.0}}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"e":{")":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"a":{"d":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":2,"docs":{"15":{"tf":1.0},"25":{"tf":1.0}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"d":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":2.8284271247461903}}},"df":0,"docs":{}}}},"d":{"df":1,"docs":{"22":{"tf":1.0}},"u":{"c":{"df":1,"docs":{"16":{"tf":1.0}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.0},"8":{"tf":1.0}}}}},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":2,"docs":{"15":{"tf":1.0},"21":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"l":{"a":{"c":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.7320508075688772},"26":{"tf":1.0}}}}},"z":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}}},"q":{"df":0,"docs":{},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":3,"docs":{"18":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"16":{"tf":1.0}}},"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}}}},"t":{"df":1,"docs":{"25":{"tf":2.0}},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"n":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"17":{"tf":1.0}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":2,"docs":{"14":{"tf":1.0},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"c":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}},"df":6,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}},"f":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"g":{"df":2,"docs":{"11":{"tf":1.0},"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}}}}},"s":{"a":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"8":{"tf":1.0}}}},"m":{"df":0,"docs":{},"e":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}},"h":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.4142135623730951},"22":{"tf":1.0}},"e":{"df":0,"docs":{},"e":{"df":2,"docs":{"10":{"tf":1.0},"17":{"tf":1.0}}},"p":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":6,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0},"21":{"tf":1.0},"23":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}},"h":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"5":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"m":{"d":{"<":{"[":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.7320508075688772}}}},"df":0,"docs":{}},"df":11,"docs":{"1":{"tf":1.0},"10":{"tf":1.0},"16":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.0},"20":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"26":{"tf":1.0}},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}}},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"z":{"df":0,"docs":{},"e":{"df":1,"docs":{"15":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"w":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":3,"docs":{"17":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}},"u":{"df":0,"docs":{},"r":{"c":{"df":4,"docs":{"18":{"tf":1.0},"22":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":3,"docs":{"11":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"q":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"e":{"2":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"3":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":3,"docs":{"11":{"tf":1.0},"15":{"tf":1.0},"25":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"26":{"tf":2.0}}}}},"r":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"m":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"u":{"b":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}}}}},"c":{"df":0,"docs":{},"h":{"df":4,"docs":{"11":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"m":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":1,"docs":{"17":{"tf":2.6457513110645907}},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":2.0},"11":{"tf":1.0},"20":{"tf":1.0},"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.7320508075688772}}}}}}}},"y":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"x":{"df":3,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}},"t":{"a":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"(":{"df":1,"docs":{"21":{"tf":1.0}}},"/":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"/":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"=":{"$":{"df":0,"docs":{},"{":{"df":0,"docs":{},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"i":{"6":{"8":{"6":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"12":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":5,"docs":{"10":{"tf":3.7416573867739413},"11":{"tf":2.8284271247461903},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.7320508075688772}}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"g":{"df":1,"docs":{"16":{"tf":1.0}}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"h":{"a":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":2,"docs":{"11":{"tf":1.0},"27":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":2,"docs":{"25":{"tf":1.0},"3":{"tf":1.0}}}},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}}}}}}}},"i":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772}}}},"p":{"df":1,"docs":{"3":{"tf":1.0}}}},"o":{"/":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"l":{"df":5,"docs":{"23":{"tf":1.0},"24":{"tf":1.0},"26":{"tf":1.0},"27":{"tf":1.7320508075688772},"28":{"tf":1.4142135623730951}}}}},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"i":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0}}}},"v":{"df":0,"docs":{},"i":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"o":{"df":4,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":1.0},"26":{"tf":1.0},"9":{"tf":1.0}}}}},"u":{"6":{"4":{"df":0,"docs":{},"x":{"2":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"20":{"tf":1.0},"8":{"tf":1.0}}}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":2,"docs":{"10":{"tf":1.0},"8":{"tf":1.0}}}}}},"p":{"c":{"df":0,"docs":{},"k":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"s":{"a":{"df":0,"docs":{},"f":{"df":2,"docs":{"15":{"tf":1.0},"23":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":3,"docs":{"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"8":{"tf":1.0}}},"s":{"df":12,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":2.23606797749979},"15":{"tf":1.4142135623730951},"17":{"tf":1.7320508075688772},"20":{"tf":1.4142135623730951},"21":{"tf":2.23606797749979},"22":{"tf":1.4142135623730951},"23":{"tf":1.0},"25":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":2.0}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.0}}}}}},"v":{"7":{"df":1,"docs":{"10":{"tf":1.0}}},"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"u":{"df":2,"docs":{"17":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"i":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}},"o":{"df":0,"docs":{},"u":{"df":4,"docs":{"20":{"tf":1.0},"21":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":8,"docs":{"10":{"tf":1.0},"15":{"tf":1.4142135623730951},"16":{"tf":2.6457513110645907},"17":{"tf":2.6457513110645907},"25":{"tf":1.0},"5":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"17":{"tf":1.0}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":2.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":2,"docs":{"16":{"tf":1.0},"9":{"tf":1.0}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"15":{"tf":1.0},"20":{"tf":1.0}}}}},"i":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"17":{"tf":1.0}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.7320508075688772}}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"k":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"4":{"tf":1.0}},"l":{"df":0,"docs":{},"o":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":3,"docs":{"15":{"tf":1.0},"21":{"tf":1.0},"3":{"tf":1.0}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"x":{"8":{"6":{"_":{"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"1":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"1":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"2":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"y":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"f":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}}}}}},"breadcrumbs":{"root":{"0":{",":{"3":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"7":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{".":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"(":{")":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"_":{"b":{"df":0,"docs":{},"y":{"(":{"4":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}}},"0":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":2,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":2.6457513110645907}}},"1":{",":{"5":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"0":{"0":{",":{"0":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"21":{"tf":1.0}}},"2":{",":{"5":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":2.8284271247461903}}},"2":{",":{"3":{"5":{"6":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"2":{"8":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"7":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"9":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"3":{".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"df":2,"docs":{"10":{"tf":1.0},"16":{"tf":1.4142135623730951}}},"4":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}},"5":{",":{"0":{"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"6":{"df":1,"docs":{"16":{"tf":1.0}}},"7":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"9":{"3":{",":{"4":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"4":{",":{"1":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"a":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"7":{"5":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"b":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}}}}},"c":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"23":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":4,"docs":{"17":{"tf":1.0},"25":{"tf":1.7320508075688772},"26":{"tf":1.0},"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"d":{"d":{"df":1,"docs":{"7":{"tf":1.4142135623730951}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.4142135623730951},"17":{"tf":1.0}}}},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"c":{"df":2,"docs":{"10":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}}}},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":2,"docs":{"15":{"tf":2.0},"25":{"tf":1.4142135623730951}}}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"8":{"tf":1.0},"9":{"tf":1.0}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}}},"m":{"d":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":1,"docs":{"24":{"tf":1.4142135623730951}}}},"z":{"df":2,"docs":{"27":{"tf":1.4142135623730951},"28":{"tf":1.7320508075688772}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"8":{"tf":1.0}},"i":{"df":1,"docs":{"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"6":{"tf":1.4142135623730951}}}}}}}}},"r":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"28":{"tf":1.0}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":5,"docs":{"17":{"tf":1.4142135623730951},"25":{"tf":1.0},"27":{"tf":1.4142135623730951},"28":{"tf":1.4142135623730951},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}},"m":{"df":2,"docs":{"10":{"tf":1.0},"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"k":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"t":{"!":{"(":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":1,"docs":{"15":{"tf":1.7320508075688772}}}}},"o":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"t":{"&":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"12":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"3":{"tf":1.0}}},"df":0,"docs":{}}},"x":{"2":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"15":{"tf":1.0},"3":{"tf":1.0}}}},"df":0,"docs":{}}},"b":{"a":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"18":{"tf":1.0}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":2,"docs":{"22":{"tf":1.0},"28":{"tf":1.0}},"h":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"10":{"tf":1.0}}}}},"n":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":2,"docs":{"21":{"tf":1.4142135623730951},"22":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}}}},"i":{"df":0,"docs":{},"g":{"df":1,"docs":{"15":{"tf":1.0}}},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}},"r":{"d":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"o":{"df":0,"docs":{},"k":{"df":3,"docs":{"18":{"tf":1.0},"20":{"tf":1.0},"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"n":{"c":{"df":0,"docs":{},"h":{"df":3,"docs":{"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"d":{"df":5,"docs":{"11":{"tf":1.4142135623730951},"15":{"tf":1.4142135623730951},"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"k":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"c":{"/":{"c":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"21":{"tf":1.0},"23":{"tf":1.0}},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.7320508075688772},"23":{"tf":2.0}},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{".":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"u":{"df":0,"docs":{},"s":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"df":3,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":1.4142135623730951},"25":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":7,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"16":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}}}}},"df":0,"docs":{}}}},"f":{"df":0,"docs":{},"g":{"(":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"18":{"tf":1.0}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"4":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":2,"docs":{"11":{"tf":1.0},"15":{"tf":2.23606797749979}}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":12,"docs":{"11":{"tf":2.0},"18":{"tf":1.4142135623730951},"20":{"tf":1.0},"22":{"tf":1.0},"23":{"tf":1.0},"24":{"tf":1.4142135623730951},"25":{"tf":2.0},"27":{"tf":1.4142135623730951},"28":{"tf":1.4142135623730951},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}},"n":{"d":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"p":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":1.0},"11":{"tf":1.7320508075688772},"25":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.7320508075688772}},"e":{"df":0,"docs":{},"r":{"'":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}}},"u":{"df":0,"docs":{},"t":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"28":{"tf":1.0}}}}}},"n":{"d":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"t":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}},"r":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"u":{"'":{"df":1,"docs":{"26":{"tf":1.0}}},"=":{"<":{"c":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"x":{"df":1,"docs":{"11":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"df":7,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":3.0},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"25":{"tf":2.6457513110645907},"26":{"tf":1.4142135623730951},"27":{"tf":1.4142135623730951}}}},"r":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"9":{"tf":1.0}}}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"y":{"c":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"d":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":4,"docs":{"15":{"tf":1.7320508075688772},"18":{"tf":1.4142135623730951},"21":{"tf":1.0},"22":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":1,"docs":{"21":{"tf":1.0}}}}}}}}},"d":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":5,"docs":{"15":{"tf":1.0},"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":3,"docs":{"10":{"tf":1.0},"15":{"tf":1.0},"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":2,"docs":{"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"s":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}},"d":{"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}}}}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"o":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"df":1,"docs":{"17":{"tf":1.0}},"n":{"'":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"e":{"df":1,"docs":{"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"a":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.0}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}}}}}}},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"a":{"b":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":2.6457513110645907},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"8":{"tf":2.0},"9":{"tf":1.0}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"9":{"tf":1.0}}}}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}},"t":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}}}},"x":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":8,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"16":{"tf":1.4142135623730951},"17":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":4,"docs":{"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"22":{"tf":1.7320508075688772},"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}}}}},"df":0,"docs":{}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":3,"docs":{"10":{"tf":1.7320508075688772},"25":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"f":{"3":{"2":{"df":1,"docs":{"17":{"tf":2.449489742783178}},"x":{"4":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"x":{"[":{"df":0,"docs":{},"i":{".":{".":{"]":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"(":{"0":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"s":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":2,"docs":{"11":{"tf":1.0},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":8,"docs":{"10":{"tf":2.8284271247461903},"11":{"tf":1.7320508075688772},"12":{"tf":1.0},"13":{"tf":1.0},"14":{"tf":1.7320508075688772},"20":{"tf":1.0},"8":{"tf":1.7320508075688772},"9":{"tf":1.0}},"e":{"=":{"+":{"df":0,"docs":{},"v":{"7":{",":{"+":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"<":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"s":{"=":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"2":{",":{"+":{"df":0,"docs":{},"f":{"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"3":{",":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":1,"docs":{"27":{"tf":1.0}}}},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"h":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"l":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"t":{"df":4,"docs":{"4":{"tf":1.7320508075688772},"5":{"tf":1.0},"6":{"tf":1.0},"7":{"tf":1.0}}}},"df":0,"docs":{}}},"m":{"a":{"df":1,"docs":{"10":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"17":{"tf":1.0},"25":{"tf":1.4142135623730951}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"20":{"tf":1.0}}}},"n":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":5,"docs":{"15":{"tf":1.0},"17":{"tf":1.7320508075688772},"22":{"tf":1.4142135623730951},"25":{"tf":1.4142135623730951},"6":{"tf":1.4142135623730951}}}}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"7":{"tf":1.4142135623730951}}}}}},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":7,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":2.0},"18":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"r":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"h":{"=":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"f":{"df":1,"docs":{"21":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"b":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}},"u":{"a":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"r":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.0},"25":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"p":{"df":1,"docs":{"23":{"tf":1.0}}},"r":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":3,"docs":{"21":{"tf":1.0},"23":{"tf":1.0},"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"22":{"tf":1.0},"25":{"tf":1.0}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"2":{"tf":1.4142135623730951}}}}}}}},"o":{"df":0,"docs":{},"l":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"z":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":2.0},"17":{"tf":2.6457513110645907}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.4142135623730951}}}},"t":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"i":{".":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.4142135623730951}}},"a":{"c":{"a":{"df":1,"docs":{"28":{"tf":1.7320508075688772}}},"df":0,"docs":{}},"df":0,"docs":{}},"d":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":3,"docs":{"11":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":1,"docs":{"15":{"tf":1.0}}}},"m":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"a":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":2,"docs":{"18":{"tf":1.0},"25":{"tf":1.0}}}}}}},"n":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"10":{"tf":1.0},"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"u":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}},"f":{"df":0,"docs":{},"o":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}},"r":{"df":0,"docs":{},"m":{"df":4,"docs":{"16":{"tf":1.0},"18":{"tf":1.0},"27":{"tf":1.0},"4":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"23":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"13":{"tf":1.4142135623730951}}}}},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"e":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":1.7320508075688772},"22":{"tf":1.7320508075688772},"25":{"tf":2.23606797749979},"26":{"tf":2.0},"27":{"tf":1.7320508075688772},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}}},"df":0,"docs":{}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}},"l":{"'":{"df":2,"docs":{"25":{"tf":1.0},"28":{"tf":1.7320508075688772}}},"df":3,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772},"28":{"tf":1.0}}},"r":{"df":0,"docs":{},"f":{"a":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"n":{"df":3,"docs":{"25":{"tf":1.0},"26":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"0":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"s":{"df":0,"docs":{},"u":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.0}}}}}},"j":{"a":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"k":{"8":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"'":{"df":1,"docs":{"20":{"tf":1.0}}},"df":1,"docs":{"28":{"tf":1.0}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"27":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}},"r":{"df":0,"docs":{},"g":{"df":1,"docs":{"17":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"df":0,"docs":{},"i":{"b":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"5":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}}},"k":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"x":{"df":2,"docs":{"19":{"tf":1.4142135623730951},"20":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"t":{"df":4,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"v":{"df":0,"docs":{},"m":{"df":1,"docs":{"29":{"tf":1.4142135623730951}}}}},"o":{"a":{"d":{"/":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"c":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.0}}}},"o":{"df":0,"docs":{},"k":{"df":2,"docs":{"20":{"tf":1.0},"25":{"tf":1.4142135623730951}}},"p":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"m":{"a":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":3,"docs":{"17":{"tf":1.0},"24":{"tf":1.4142135623730951},"27":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":2,"docs":{"27":{"tf":1.4142135623730951},"3":{"tf":1.0}}},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":1,"docs":{"23":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"h":{"df":4,"docs":{"4":{"tf":1.4142135623730951},"5":{"tf":1.7320508075688772},"6":{"tf":1.0},"7":{"tf":1.0}}}},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"b":{"df":1,"docs":{"21":{"tf":1.0}}},"c":{"a":{"df":1,"docs":{"29":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":1,"docs":{"22":{"tf":1.4142135623730951}},"e":{"a":{"df":0,"docs":{},"n":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"o":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"c":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":2.0},"27":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"21":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":1,"docs":{"11":{"tf":1.7320508075688772}}},"r":{"df":0,"docs":{},"n":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"v":{"a":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":1,"docs":{"22":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"v":{"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}}},"u":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"7":{"tf":1.4142135623730951}}}}}}}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}},"n":{"]":{">":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"m":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"v":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"b":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"y":{"_":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"b":{":":{":":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{":":{":":{"a":{"d":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}},"g":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}},"x":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"10":{"tf":1.0}}}},"w":{"a":{"d":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"16":{"tf":1.0},"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"o":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"n":{"df":6,"docs":{"10":{"tf":1.0},"16":{"tf":1.0},"17":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"9":{"tf":1.0}}},"p":{"df":1,"docs":{"27":{"tf":1.0}},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"r":{"df":3,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":3.1622776601683795},"26":{"tf":1.4142135623730951}}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":5,"docs":{"11":{"tf":1.7320508075688772},"18":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}},"o":{"df":0,"docs":{},"n":{"df":3,"docs":{"15":{"tf":1.4142135623730951},"21":{"tf":1.0},"9":{"tf":1.0}}}}}}},"r":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"20":{"tf":1.0}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"p":{"a":{"c":{"df":0,"docs":{},"k":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"d":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{"df":1,"docs":{"3":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.4142135623730951}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"9":{"tf":1.0}}}}},"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"f":{".":{"d":{"a":{"df":0,"docs":{},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":3,"docs":{"20":{"tf":2.0},"21":{"tf":2.449489742783178},"22":{"tf":2.23606797749979}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":15,"docs":{"15":{"tf":1.7320508075688772},"16":{"tf":1.0},"17":{"tf":2.0},"18":{"tf":2.0},"19":{"tf":1.7320508075688772},"20":{"tf":2.0},"21":{"tf":1.7320508075688772},"22":{"tf":1.4142135623730951},"23":{"tf":1.4142135623730951},"24":{"tf":1.0},"25":{"tf":2.23606797749979},"26":{"tf":1.4142135623730951},"27":{"tf":1.0},"28":{"tf":1.4142135623730951},"29":{"tf":1.0}}}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"h":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}}}},"t":{"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":1,"docs":{"11":{"tf":1.0}}},"t":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"8":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":5,"docs":{"27":{"tf":1.0},"4":{"tf":1.7320508075688772},"5":{"tf":1.0},"6":{"tf":1.0},"7":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":2,"docs":{"3":{"tf":1.0},"8":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"r":{"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"18":{"tf":1.0},"3":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":2,"docs":{"21":{"tf":1.0},"26":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}}}},"o":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"8":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":4,"docs":{"16":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":12,"docs":{"18":{"tf":2.23606797749979},"19":{"tf":1.7320508075688772},"20":{"tf":2.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951},"23":{"tf":2.0},"24":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":2.0},"27":{"tf":1.0},"28":{"tf":1.0},"29":{"tf":1.0}}}}},"g":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"m":{"'":{"df":3,"docs":{"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951}}},"df":5,"docs":{"11":{"tf":1.4142135623730951},"18":{"tf":1.4142135623730951},"21":{"tf":1.7320508075688772},"22":{"tf":1.0},"8":{"tf":1.0}},"m":{"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}}},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"28":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"v":{"df":0,"docs":{},"i":{"d":{"df":5,"docs":{"10":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0},"27":{"tf":1.0},"3":{"tf":1.0}}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"e":{")":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"a":{"d":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":2,"docs":{"15":{"tf":1.0},"25":{"tf":1.0}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"d":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":3.0}}},"df":0,"docs":{}}}},"d":{"df":1,"docs":{"22":{"tf":1.0}},"u":{"c":{"df":1,"docs":{"16":{"tf":1.0}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.0},"8":{"tf":1.0}}}}},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":2,"docs":{"15":{"tf":1.0},"21":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"l":{"a":{"c":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":2.0},"26":{"tf":1.0}}}}},"z":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}}},"q":{"df":0,"docs":{},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":3,"docs":{"18":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"16":{"tf":1.0}}},"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}}}},"t":{"df":1,"docs":{"25":{"tf":2.0}},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"n":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"17":{"tf":1.0}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":2,"docs":{"14":{"tf":1.4142135623730951},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"c":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}},"df":6,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.0}},"f":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"g":{"df":2,"docs":{"11":{"tf":1.0},"9":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}}}}}},"s":{"a":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"8":{"tf":1.0}}}},"m":{"df":0,"docs":{},"e":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}},"h":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.4142135623730951},"22":{"tf":1.0}},"e":{"df":0,"docs":{},"e":{"df":2,"docs":{"10":{"tf":1.0},"17":{"tf":1.0}}},"p":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":6,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0},"21":{"tf":1.0},"23":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}},"h":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"5":{"tf":1.4142135623730951}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"m":{"d":{"<":{"[":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.7320508075688772}}}},"df":0,"docs":{}},"df":11,"docs":{"1":{"tf":1.4142135623730951},"10":{"tf":1.0},"16":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.4142135623730951},"20":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"26":{"tf":1.0}},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}}},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"z":{"df":0,"docs":{},"e":{"df":1,"docs":{"15":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"w":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":3,"docs":{"17":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}},"u":{"df":0,"docs":{},"r":{"c":{"df":4,"docs":{"18":{"tf":1.0},"22":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":3,"docs":{"11":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"q":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"e":{"2":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"3":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":3,"docs":{"11":{"tf":1.0},"15":{"tf":1.0},"25":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"26":{"tf":2.0}}}}},"r":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"m":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"u":{"b":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}}}}},"c":{"df":0,"docs":{},"h":{"df":4,"docs":{"11":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"m":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":1,"docs":{"17":{"tf":2.6457513110645907}},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":2.0},"11":{"tf":1.0},"20":{"tf":1.0},"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.7320508075688772}}}}}}}},"y":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"x":{"df":3,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}},"t":{"a":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"(":{"df":1,"docs":{"21":{"tf":1.0}}},"/":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"/":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"=":{"$":{"df":0,"docs":{},"{":{"df":0,"docs":{},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"i":{"6":{"8":{"6":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"12":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}}},"df":9,"docs":{"10":{"tf":4.0},"11":{"tf":3.1622776601683795},"12":{"tf":1.0},"13":{"tf":1.0},"14":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":2.0},"9":{"tf":1.0}}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"g":{"df":1,"docs":{"16":{"tf":1.0}}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"h":{"a":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":2,"docs":{"11":{"tf":1.0},"27":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":2,"docs":{"25":{"tf":1.0},"3":{"tf":1.0}}}},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}}}}}}}},"i":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772}}}},"p":{"df":1,"docs":{"3":{"tf":1.0}}}},"o":{"/":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"l":{"df":5,"docs":{"23":{"tf":1.0},"24":{"tf":1.4142135623730951},"26":{"tf":1.0},"27":{"tf":1.7320508075688772},"28":{"tf":1.4142135623730951}}}}},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"i":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0}}}},"v":{"df":0,"docs":{},"i":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"o":{"df":4,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":1.0},"26":{"tf":1.0},"9":{"tf":1.0}}}}},"u":{"6":{"4":{"df":0,"docs":{},"x":{"2":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"20":{"tf":1.0},"8":{"tf":1.0}}}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":2,"docs":{"10":{"tf":1.0},"8":{"tf":1.0}}}}}},"p":{"c":{"df":0,"docs":{},"k":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"s":{"a":{"df":0,"docs":{},"f":{"df":2,"docs":{"15":{"tf":1.0},"23":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":3,"docs":{"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"8":{"tf":1.0}}},"s":{"df":12,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":2.23606797749979},"15":{"tf":1.4142135623730951},"17":{"tf":1.7320508075688772},"20":{"tf":1.7320508075688772},"21":{"tf":2.23606797749979},"22":{"tf":1.4142135623730951},"23":{"tf":1.4142135623730951},"25":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":2.23606797749979}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.0}}}}}},"v":{"7":{"df":1,"docs":{"10":{"tf":1.0}}},"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"u":{"df":2,"docs":{"17":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"i":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}},"o":{"df":0,"docs":{},"u":{"df":4,"docs":{"20":{"tf":1.0},"21":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":8,"docs":{"10":{"tf":1.0},"15":{"tf":1.4142135623730951},"16":{"tf":2.6457513110645907},"17":{"tf":2.6457513110645907},"25":{"tf":1.0},"5":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"17":{"tf":1.0}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"16":{"tf":2.0},"17":{"tf":2.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}}},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":2,"docs":{"16":{"tf":1.0},"9":{"tf":1.0}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"15":{"tf":1.0},"20":{"tf":1.0}}}}},"i":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"17":{"tf":1.0}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.7320508075688772}}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"k":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"4":{"tf":1.0}},"l":{"df":0,"docs":{},"o":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":3,"docs":{"15":{"tf":1.0},"21":{"tf":1.0},"3":{"tf":1.0}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"x":{"8":{"6":{"_":{"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"1":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"1":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"2":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"y":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"f":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}}}}}},"title":{"root":{"a":{"d":{"d":{"df":1,"docs":{"7":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":1,"docs":{"24":{"tf":1.0}}}},"z":{"df":2,"docs":{"27":{"tf":1.0},"28":{"tf":1.0}}}}}},"df":0,"docs":{}},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"6":{"tf":1.0}}}}}}}}},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"12":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"b":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}}},"o":{"d":{"df":0,"docs":{},"e":{"df":3,"docs":{"24":{"tf":1.0},"27":{"tf":1.0},"28":{"tf":1.0}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"p":{"df":0,"docs":{},"u":{"df":3,"docs":{"11":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}}}},"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}},"df":0,"docs":{}}}},"i":{"df":0,"docs":{},"s":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":1,"docs":{"3":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"8":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":3,"docs":{"10":{"tf":1.0},"14":{"tf":1.0},"8":{"tf":1.0}}}}}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"4":{"tf":1.0}}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"n":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"6":{"tf":1.0}}}}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"7":{"tf":1.0}}}}}},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"2":{"tf":1.0}}}}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"z":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.0}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}}}},"i":{"a":{"c":{"a":{"df":1,"docs":{"28":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"13":{"tf":1.0}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"'":{"df":1,"docs":{"28":{"tf":1.0}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"0":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}}},"l":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"5":{"tf":1.0}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"u":{"df":0,"docs":{},"x":{"df":1,"docs":{"19":{"tf":1.0}}}}}},"l":{"df":0,"docs":{},"v":{"df":0,"docs":{},"m":{"df":1,"docs":{"29":{"tf":1.0}}}}}},"m":{"a":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":2,"docs":{"24":{"tf":1.0},"27":{"tf":1.0}}}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":2,"docs":{"4":{"tf":1.0},"5":{"tf":1.0}}}}},"c":{"a":{"df":1,"docs":{"29":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"o":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"7":{"tf":1.0}}}}}}}}}},"o":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.0}}}}}},"p":{"a":{"c":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"d":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{"df":1,"docs":{"3":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"f":{"df":1,"docs":{"20":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":3,"docs":{"17":{"tf":1.0},"18":{"tf":1.0},"19":{"tf":1.0}}}}}}}},"o":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"4":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"18":{"tf":1.0},"19":{"tf":1.0}}}}}}}},"r":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"14":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"2":{"tf":1.0}},"f":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}}}}}}},"s":{"df":0,"docs":{},"h":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"5":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"m":{"d":{"df":2,"docs":{"1":{"tf":1.0},"2":{"tf":1.0}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"12":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":3,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"8":{"tf":1.0}}}}}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"24":{"tf":1.0}}}}}},"u":{"df":0,"docs":{},"s":{"df":3,"docs":{"20":{"tf":1.0},"23":{"tf":1.0},"9":{"tf":1.0}}}},"v":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"5":{"tf":1.0}}}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"lang":"English","pipeline":["trimmer","stopWordFilter","stemmer"],"ref":"id","version":"0.9.5"},"results_options":{"limit_results":30,"teaser_word_count":30},"search_options":{"bool":"OR","expand":true,"fields":{"body":{"boost":1},"breadcrumbs":{"boost":1},"title":{"boost":2}}}}); \ No newline at end of file diff --git a/perf-guide/searchindex.json b/perf-guide/searchindex.json new file mode 100644 index 000000000..ce6ee84cc --- /dev/null +++ b/perf-guide/searchindex.json @@ -0,0 +1 @@ +{"doc_urls":["introduction.html#introduction","introduction.html#what-is-simd","introduction.html#history-of-simd-in-rust","introduction.html#discover-packed_simd","float-math/fp.html#floating-point-math","float-math/svml.html#short-vector-math-library","float-math/approx.html#approximate-functions","float-math/fma.html#fused-multiply-add","target-feature/features.html#enabling-target-features","target-feature/rustflags.html#using-rustflags","target-feature/rustflags.html#target-feature","target-feature/rustflags.html#target-cpu","target-feature/attribute.html#the-target_feature-attribute","target-feature/inlining.html#inlining","target-feature/runtime.html#detecting-host-features-at-runtime","bound_checks.html#bounds-checking","vert-hor-ops.html#vertical-and-horizontal-operations","vert-hor-ops.html#performance-consideration-of-horizontal-operations","prof/profiling.html#performance-profiling","prof/linux.html#performance-profiling-on-linux","prof/linux.html#using-perf","prof/linux.html#recording","prof/linux.html#viewing-the-report","prof/linux.html#using-valgrind","prof/mca.html#machine-code-analysis-tools","prof/mca.html#the-microarchitecture-of-modern-cpus","prof/mca.html#summary-of-cpu-internals","prof/mca.html#analyzing-the-machine-code","prof/mca.html#intels-architecture-code-analyzer-iaca","prof/mca.html#llvm-mca"],"index":{"documentStore":{"docInfo":{"0":{"body":0,"breadcrumbs":1,"title":1},"1":{"body":0,"breadcrumbs":1,"title":1},"10":{"body":102,"breadcrumbs":4,"title":2},"11":{"body":92,"breadcrumbs":4,"title":2},"12":{"body":0,"breadcrumbs":4,"title":2},"13":{"body":0,"breadcrumbs":3,"title":1},"14":{"body":0,"breadcrumbs":6,"title":4},"15":{"body":74,"breadcrumbs":2,"title":2},"16":{"body":77,"breadcrumbs":3,"title":3},"17":{"body":130,"breadcrumbs":4,"title":4},"18":{"body":47,"breadcrumbs":2,"title":2},"19":{"body":0,"breadcrumbs":5,"title":3},"2":{"body":0,"breadcrumbs":3,"title":3},"20":{"body":31,"breadcrumbs":4,"title":2},"21":{"body":113,"breadcrumbs":3,"title":1},"22":{"body":127,"breadcrumbs":4,"title":2},"23":{"body":29,"breadcrumbs":4,"title":2},"24":{"body":0,"breadcrumbs":6,"title":4},"25":{"body":115,"breadcrumbs":5,"title":3},"26":{"body":71,"breadcrumbs":5,"title":3},"27":{"body":47,"breadcrumbs":5,"title":3},"28":{"body":18,"breadcrumbs":7,"title":5},"29":{"body":0,"breadcrumbs":4,"title":2},"3":{"body":29,"breadcrumbs":2,"title":2},"4":{"body":8,"breadcrumbs":3,"title":3},"5":{"body":0,"breadcrumbs":7,"title":4},"6":{"body":0,"breadcrumbs":5,"title":2},"7":{"body":0,"breadcrumbs":6,"title":3},"8":{"body":55,"breadcrumbs":3,"title":3},"9":{"body":35,"breadcrumbs":4,"title":2}},"docs":{"0":{"body":"","breadcrumbs":"Introduction","id":"0","title":"Introduction"},"1":{"body":"","breadcrumbs":"What is SIMD","id":"1","title":"What is SIMD"},"10":{"body":"Syntax: -C target-feature= Provides the compiler with a comma-separated set of instruction extensions to enable. Example : Use -C target-features=+sse3,+avx to enable generating instructions for Streaming SIMD Extensions 3 and Advanced Vector Extensions . To list target triples for all targets supported by Rust, use: rustc --print target-list To list all support target features for a certain target triple, use: rustc --target=${TRIPLE} --print target-features Note that all CPU features are independent, and will have to be enabled individually. Example : Setting -C target-features=+avx2 will not enable fma, even though all CPUs which support AVX2 also support FMA. To enable both, one has to use -C target-features=+avx2,+fma Some features also depend on other features, which need to be enabled for the target instructions to be generated. Example : Unless v7 is specified as the target CPU (see below), to enable NEON on ARM it is necessary to use -C target-feature=+v7,+neon.","breadcrumbs":"Target features » target-feature","id":"10","title":"target-feature"},"11":{"body":"Syntax: -C target-cpu= Sets the identifier of a CPU family / model for which to build and optimize the code. Example : RUSTFLAGS='-C target-cpu=cortex-a75' To list all supported target CPUs for a certain target triple, use: rustc --target=${TRIPLE} --print target-cpus Example : rustc --target=i686-pc-windows-msvc --print target-cpus The compiler will translate this into a list of target features. Therefore, individual feature checks (#[cfg(target_feature = \"...\")]) will still work properly. It will cause the code generator to optimize the generated code for that specific CPU model. Using native as the CPU model will cause Rust to generate and optimize code for the CPU running the compiler. It is useful when building programs which you plan to only use locally. This should never be used when the generated programs are meant to be run on other computers, such as when packaging for distribution or cross-compiling.","breadcrumbs":"Target features » target-cpu","id":"11","title":"target-cpu"},"12":{"body":"","breadcrumbs":"Target features » The target_feature attribute","id":"12","title":"The target_feature attribute"},"13":{"body":"","breadcrumbs":"Target features » Inlining","id":"13","title":"Inlining"},"14":{"body":"","breadcrumbs":"Target features » Detecting host features at runtime","id":"14","title":"Detecting host features at runtime"},"15":{"body":"Reading and writing packed vectors to/from slices is checked by default. Independently of the configuration options used, the safe functions: Simd<[T; N]>::from_slice_aligned(& s[..]) Simd<[T; N]>::write_to_slice_aligned(&mut s[..]) always check that: the slice is big enough to hold the vector the slice is suitably aligned to perform an aligned load/store for a Simd<[T; N]> (this alignment is often much larger than that of T). There are _unaligned versions that use unaligned load and stores, as well as unsafe _unchecked that do not perform any checks iff debug-assertions = false / debug = false. That is, the _unchecked methods do still assert size and alignment in debug builds and could also do so in release builds depending on the configuration options. These assertions do often significantly impact performance and you should be aware of them.","breadcrumbs":"Bounds checking","id":"15","title":"Bounds checking"},"16":{"body":"In SIMD terminology, each vector has a certain \"width\" (number of lanes). A vector processor is able to perform two kinds of operations on a vector: Vertical operations: operate on two vectors of the same width, result has same width Example : vertical addition of two f32x4 vectors %0 == | 2 | -3.5 | 0 | 7 | + + + + %1 == | 4 | 1.5 | -1 | 0 | = = = =\n%0 + %1 == | 6 | -2 | -1 | 7 | Horizontal operations: reduce the elements of two vectors in some way, the result's elements combine information from the two original ones Example : horizontal addition of two u64x2 vectors %0 == | 1 | 3 | └─+───┘ └───────┐ │ %1 == | 4 | -1 | │ └─+──┘ │ └───┐ │ │ │ ┌─────│───┘ ▼ ▼\n%0 + %1 == | 4 | 3 |","breadcrumbs":"Vertical and horizontal operations","id":"16","title":"Vertical and horizontal operations"},"17":{"body":"The result of vertical operations, like vector negation: -a, for a given lane, does not depend on the result of the operation for the other lanes. The result of horizontal operations, like the vector sum reduction: a.sum(), depends on the value of all vector lanes. In virtually all architectures vertical operations are fast, while horizontal operations are, by comparison, very slow. Consider the following two functions for computing the sum of all f32 values in a slice: fn fast_sum(x: &[f32]) -> f32 { assert!(x.len() % 4 == 0); let mut sum = f32x4::splat(0.); // [0., 0., 0., 0.] for i in (0..x.len()).step_by(4) { sum += f32x4::from_slice_unaligned(&x[i..]); } sum.sum()\n} fn slow_sum(x: &[f32]) -> f32 { assert!(x.len() % 4 == 0); let mut sum: f32 = 0.; for i in (0..x.len()).step_by(4) { sum += f32x4::from_slice_unaligned(&x[i..]).sum(); } sum\n} The inner loop over the slice is where the bulk of the work actually happens. There, the fast_sum function perform vertical operations into a vector, doing a single horizontal reduction at the end, while the slow_sum function performs horizontal vector operations inside of the loop. On all widely-used architectures, fast_sum is a large constant factor faster than slow_sum. You can run the slice_sum example and see for yourself. On the particular machine tested there the algorithm using the horizontal vector addition is 2.7x slower than the one using vertical vector operations!","breadcrumbs":"Performance consideration of horizontal operations","id":"17","title":"Performance consideration of horizontal operations"},"18":{"body":"While the rest of the book provides practical advice on how to improve the performance of SIMD code, this chapter is dedicated to performance profiling . Profiling consists of recording a program's execution in order to identify program hotspots. Important : most profilers require debug information in order to accurately link the program hotspots back to the corresponding source code lines. Rust will disable debug info generation by default for optimized builds, but you can change that in your Cargo.toml .","breadcrumbs":"Performance profiling","id":"18","title":"Performance profiling"},"19":{"body":"","breadcrumbs":"Performance profiling » Performance profiling on Linux","id":"19","title":"Performance profiling on Linux"},"2":{"body":"","breadcrumbs":"History of SIMD in Rust","id":"2","title":"History of SIMD in Rust"},"20":{"body":"perf is the most powerful performance profiler for Linux, featuring support for various hardware Performance Monitoring Units, as well as integration with the kernel's performance events framework. We will only look at how can the perf command can be used to profile SIMD code. Full system profiling is outside of the scope of this book.","breadcrumbs":"Performance profiling » Using perf","id":"20","title":"Using perf"},"21":{"body":"The first step is to record a program's execution during an average workload. It helps if you can isolate the parts of your program which have performance issues, and set up a benchmark which can be easily (re)run. Build the benchmark binary in release mode, after having enabled debug info: $ cargo build --release\nFinished release [optimized + debuginfo] target(s) in 0.02s Then use the perf record subcommand: $ perf record --call-graph=dwarf ./target/release/my-program\n[ perf record: Woken up 10 times to write data ]\n[ perf record: Captured and wrote 2,356 MB perf.data (292 samples) ] Instead of using --call-graph=dwarf, which can become pretty slow, you can use --call-graph=lbr if you have a processor with support for Last Branch Record (i.e. Intel Haswell and newer). perf will, by default, record the count of CPU cycles it takes to execute various parts of your program. You can use the -e command line option to enable other performance events, such as cache-misses. Use perf list to get a list of all hardware counters supported by your CPU.","breadcrumbs":"Performance profiling » Recording","id":"21","title":"Recording"},"22":{"body":"The next step is getting a bird's eye view of the program's execution. perf provides a ncurses-based interface which will get you started. Use perf report to open a visualization of your program's performance: perf report --hierarchy -M intel --hierarchy will display a tree-like structure of where your program spent most of its time. -M intel enables disassembly output with Intel syntax, which is subjectively more readable than the default AT&T syntax. Here is the output from profiling the nbody benchmark: - 100,00% nbody - 94,18% nbody + 93,48% [.] nbody_lib::simd::advance + 0,70% [.] nbody_lib::run + 5,06% libc-2.28.so If you move with the arrow keys to any node in the tree, you can the press a to have perf annotate that node. This means it will: disassemble the function associate every instruction with the percentage of time which was spent executing it interleaves the disassembly with the source code, assuming it found the debug symbols (you can use s to toggle this behaviour) perf will, by default, open the instruction which it identified as being the hottest spot in the function: 0,76 │ movapd xmm2,xmm0\n0,38 │ movhlps xmm2,xmm0 │ addpd xmm2,xmm0 │ unpcklpd xmm1,xmm2\n12,50 │ sqrtpd xmm0,xmm1\n1,52 │ mulpd xmm0,xmm1 In this case, sqrtpd will be highlighted in red, since that's the instruction which the CPU spends most of its time executing.","breadcrumbs":"Performance profiling » Viewing the report","id":"22","title":"Viewing the report"},"23":{"body":"Valgrind is a set of tools which initially helped C/C++ programmers find unsafe memory accesses in their code. Nowadays the project also has a heap profiler called massif a cache utilization profiler called cachegrind a call-graph performance profiler called callgrind","breadcrumbs":"Performance profiling » Using Valgrind","id":"23","title":"Using Valgrind"},"24":{"body":"","breadcrumbs":"Performance profiling » Machine code analysis tools","id":"24","title":"Machine code analysis tools"},"25":{"body":"While you might have heard of Instruction Set Architectures, such as x86 or arm or mips, the term microarchitecture (also written here as µ-arch ), refers to the internal details of an actual family of CPUs, such as Intel's Haswell or AMD's Jaguar . Replacing scalar code with SIMD code will improve performance on all CPUs supporting the required vector extensions. However, due to microarchitectural differences, the actual speed-up at runtime might vary. Example : a simple example arises when optimizing for AMD K8 CPUs. The assembly generated for an empty function should look like this: nop\nret The nop is used to align the ret instruction for better performance. However, the compiler will actually generated the following code: repz ret The repz instruction will repeat the following instruction until a certain condition. Of course, in this situation, the function will simply immediately return, and the ret instruction is still aligned. However, AMD K8's branch predictor performs better with the latter code. For those looking to absolutely maximize performance for a certain target µ-arch, you will have to read some CPU manuals, or ask the compiler to do it for you with -C target-cpu.","breadcrumbs":"Performance profiling » The microarchitecture of modern CPUs","id":"25","title":"The microarchitecture of modern CPUs"},"26":{"body":"Modern processors are able to execute instructions out-of-order for better performance, by utilizing tricks such as branch prediction , instruction pipelining , or superscalar execution . SIMD instructions are also subject to these optimizations, meaning it can get pretty difficult to determine where the slowdown happens. For example, if the profiler reports a store operation is slow, one of two things could be happening: the store is limited by the CPU's memory bandwidth, which is actually an ideal scenario, all things considered; memory bandwidth is nowhere near its peak, but the value to be stored is at the end of a long chain of operations, and this store is where the profiler encountered the pipeline stall; Since most profilers are simple tools which don't understand the subtleties of instruction scheduling, you","breadcrumbs":"Performance profiling » Summary of CPU internals","id":"26","title":"Summary of CPU internals"},"27":{"body":"Certain tools have knowledge of internal CPU microarchitecture, i.e. they know how many physical register files a CPU actually has what is the latency / throughtput of an instruction what µ-ops are generated for a set of instructions and many other architectural details. These tools are therefore able to provide accurate information as to why some instructions are inefficient, and where the bottleneck is. The disadvantage is that the output of these tools requires advanced knowledge of the target architecture to understand, i.e. they cannot point out what the cause of the issue is explicitly.","breadcrumbs":"Performance profiling » Analyzing the machine code","id":"27","title":"Analyzing the machine code"},"28":{"body":"IACA is a free tool offered by Intel for analyzing the performance of various computational kernels. Being a proprietary, closed source tool, it only supports Intel's µ-arches.","breadcrumbs":"Performance profiling » Intel's Architecture Code Analyzer (IACA)","id":"28","title":"Intel's Architecture Code Analyzer (IACA)"},"29":{"body":"","breadcrumbs":"Performance profiling » llvm-mca","id":"29","title":"llvm-mca"},"3":{"body":"Writing fast and portable SIMD algorithms using packed_simd is, unfortunately, not trivial. There are many pitfals that one should be aware of, and some idioms that help avoid those pitfalls. This book attempts to document these best practices and provides practical examples on how to apply the tips to your code.","breadcrumbs":"Discover packed_simd","id":"3","title":"Discover packed_simd"},"4":{"body":"This chapter contains information pertaining to working with floating-point numbers.","breadcrumbs":"Floating-point math","id":"4","title":"Floating-point math"},"5":{"body":"","breadcrumbs":"Floating-point Math » Short Vector Math Library","id":"5","title":"Short Vector Math Library"},"6":{"body":"","breadcrumbs":"Floating-point Math » Approximate functions","id":"6","title":"Approximate functions"},"7":{"body":"","breadcrumbs":"Floating-point Math » Fused Multiply Add","id":"7","title":"Fused Multiply Add"},"8":{"body":"Not all processors of a certain architecture will have SIMD processing units, and using a SIMD instruction which is not supported will trigger undefined behavior. To allow building safe, portable programs, the Rust compiler will not , by default, generate any sort of vector instructions, unless it can statically determine they are supported. For example, on AMD64, SSE2 support is architecturally guaranteed. The x86_64-apple-darwin target enables up to SSSE3. The get a defintive list of which features are enabled by default on various platforms, refer to the target specifications in the compiler's source code .","breadcrumbs":"Enabling target features","id":"8","title":"Enabling target features"},"9":{"body":"One of the easiest ways to benefit from SIMD is to allow the compiler to generate code using certain vector instruction extensions. The environment variable RUSTFLAGS can be used to pass options for code generation to the Rust compiler. These flags will affect all compiled crates. There are two flags which can be used to enable specific vector extensions:","breadcrumbs":"Target features » Using RUSTFLAGS","id":"9","title":"Using RUSTFLAGS"}},"length":30,"save":true},"fields":["title","body","breadcrumbs"],"index":{"body":{"root":{"0":{",":{"3":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"7":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{".":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"(":{")":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"_":{"b":{"df":0,"docs":{},"y":{"(":{"4":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}}},"0":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":2,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":2.6457513110645907}}},"1":{",":{"5":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"0":{"0":{",":{"0":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"21":{"tf":1.0}}},"2":{",":{"5":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":2.8284271247461903}}},"2":{",":{"3":{"5":{"6":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"2":{"8":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"7":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"9":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"3":{".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"df":2,"docs":{"10":{"tf":1.0},"16":{"tf":1.4142135623730951}}},"4":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}},"5":{",":{"0":{"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"6":{"df":1,"docs":{"16":{"tf":1.0}}},"7":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"9":{"3":{",":{"4":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"4":{",":{"1":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"a":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"7":{"5":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"b":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}}}}},"c":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"23":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":4,"docs":{"17":{"tf":1.0},"25":{"tf":1.7320508075688772},"26":{"tf":1.0},"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"d":{"d":{"df":1,"docs":{"7":{"tf":1.0}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.4142135623730951},"17":{"tf":1.0}}}},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"c":{"df":2,"docs":{"10":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}}}},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":2,"docs":{"15":{"tf":2.0},"25":{"tf":1.4142135623730951}}}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"8":{"tf":1.0},"9":{"tf":1.0}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}}},"m":{"d":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":1,"docs":{"24":{"tf":1.0}}}},"z":{"df":2,"docs":{"27":{"tf":1.0},"28":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"8":{"tf":1.0}},"i":{"df":1,"docs":{"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"6":{"tf":1.0}}}}}}}}},"r":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"28":{"tf":1.0}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":5,"docs":{"17":{"tf":1.4142135623730951},"25":{"tf":1.0},"27":{"tf":1.4142135623730951},"28":{"tf":1.0},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}},"m":{"df":2,"docs":{"10":{"tf":1.0},"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"k":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"t":{"!":{"(":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":1,"docs":{"15":{"tf":1.7320508075688772}}}}},"o":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"t":{"&":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"12":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"3":{"tf":1.0}}},"df":0,"docs":{}}},"x":{"2":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"15":{"tf":1.0},"3":{"tf":1.0}}}},"df":0,"docs":{}}},"b":{"a":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"18":{"tf":1.0}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":2,"docs":{"22":{"tf":1.0},"28":{"tf":1.0}},"h":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"10":{"tf":1.0}}}}},"n":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":2,"docs":{"21":{"tf":1.4142135623730951},"22":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}}}},"i":{"df":0,"docs":{},"g":{"df":1,"docs":{"15":{"tf":1.0}}},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}},"r":{"d":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"o":{"df":0,"docs":{},"k":{"df":3,"docs":{"18":{"tf":1.0},"20":{"tf":1.0},"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"n":{"c":{"df":0,"docs":{},"h":{"df":3,"docs":{"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"d":{"df":5,"docs":{"11":{"tf":1.4142135623730951},"15":{"tf":1.4142135623730951},"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"k":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"c":{"/":{"c":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"21":{"tf":1.0},"23":{"tf":1.0}},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.7320508075688772},"23":{"tf":2.0}},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{".":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"u":{"df":0,"docs":{},"s":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"df":3,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":1.4142135623730951},"25":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":7,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"16":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}}}}},"df":0,"docs":{}}}},"f":{"df":0,"docs":{},"g":{"(":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"18":{"tf":1.0}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"4":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":2,"docs":{"11":{"tf":1.0},"15":{"tf":2.0}}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":12,"docs":{"11":{"tf":2.0},"18":{"tf":1.4142135623730951},"20":{"tf":1.0},"22":{"tf":1.0},"23":{"tf":1.0},"24":{"tf":1.0},"25":{"tf":2.0},"27":{"tf":1.0},"28":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}},"n":{"d":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"p":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":1.0},"11":{"tf":1.7320508075688772},"25":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.7320508075688772}},"e":{"df":0,"docs":{},"r":{"'":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}}},"u":{"df":0,"docs":{},"t":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"28":{"tf":1.0}}}}}},"n":{"d":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"t":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}},"r":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"u":{"'":{"df":1,"docs":{"26":{"tf":1.0}}},"=":{"<":{"c":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"x":{"df":1,"docs":{"11":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"df":7,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":2.8284271247461903},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"25":{"tf":2.449489742783178},"26":{"tf":1.0},"27":{"tf":1.4142135623730951}}}},"r":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"9":{"tf":1.0}}}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"y":{"c":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"d":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":4,"docs":{"15":{"tf":1.7320508075688772},"18":{"tf":1.4142135623730951},"21":{"tf":1.0},"22":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":1,"docs":{"21":{"tf":1.0}}}}}}}}},"d":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":5,"docs":{"15":{"tf":1.0},"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":3,"docs":{"10":{"tf":1.0},"15":{"tf":1.0},"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":2,"docs":{"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"s":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}},"d":{"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}}}}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":1,"docs":{"3":{"tf":1.0}}}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"o":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"df":1,"docs":{"17":{"tf":1.0}},"n":{"'":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"e":{"df":1,"docs":{"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"a":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.0}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}}}}}}},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"a":{"b":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":2.6457513110645907},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"8":{"tf":1.7320508075688772},"9":{"tf":1.0}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"9":{"tf":1.0}}}}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}},"t":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}}}},"x":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":8,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"16":{"tf":1.4142135623730951},"17":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":4,"docs":{"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"22":{"tf":1.7320508075688772},"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}}}}},"df":0,"docs":{}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":3,"docs":{"10":{"tf":1.7320508075688772},"25":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"f":{"3":{"2":{"df":1,"docs":{"17":{"tf":2.449489742783178}},"x":{"4":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"x":{"[":{"df":0,"docs":{},"i":{".":{".":{"]":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"(":{"0":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"s":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":2,"docs":{"11":{"tf":1.0},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":5,"docs":{"10":{"tf":2.449489742783178},"11":{"tf":1.4142135623730951},"14":{"tf":1.0},"20":{"tf":1.0},"8":{"tf":1.4142135623730951}},"e":{"=":{"+":{"df":0,"docs":{},"v":{"7":{",":{"+":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"<":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"s":{"=":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"2":{",":{"+":{"df":0,"docs":{},"f":{"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"3":{",":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":1,"docs":{"27":{"tf":1.0}}}},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"h":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"l":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"4":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"m":{"a":{"df":1,"docs":{"10":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"17":{"tf":1.0},"25":{"tf":1.4142135623730951}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"20":{"tf":1.0}}}},"n":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":5,"docs":{"15":{"tf":1.0},"17":{"tf":1.7320508075688772},"22":{"tf":1.4142135623730951},"25":{"tf":1.4142135623730951},"6":{"tf":1.0}}}}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"7":{"tf":1.0}}}}}},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":7,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":2.0},"18":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"r":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"h":{"=":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"f":{"df":1,"docs":{"21":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"b":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}},"u":{"a":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"r":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.0},"25":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"p":{"df":1,"docs":{"23":{"tf":1.0}}},"r":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":3,"docs":{"21":{"tf":1.0},"23":{"tf":1.0},"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"22":{"tf":1.0},"25":{"tf":1.0}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"2":{"tf":1.0}}}}}}}},"o":{"df":0,"docs":{},"l":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"z":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":2.449489742783178}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}},"t":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"i":{".":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.4142135623730951}}},"a":{"c":{"a":{"df":1,"docs":{"28":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":0,"docs":{}},"d":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":3,"docs":{"11":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":1,"docs":{"15":{"tf":1.0}}}},"m":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"a":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":2,"docs":{"18":{"tf":1.0},"25":{"tf":1.0}}}}}}},"n":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"10":{"tf":1.0},"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"u":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}},"f":{"df":0,"docs":{},"o":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}},"r":{"df":0,"docs":{},"m":{"df":4,"docs":{"16":{"tf":1.0},"18":{"tf":1.0},"27":{"tf":1.0},"4":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"23":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"13":{"tf":1.0}}}}},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"e":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":1.7320508075688772},"22":{"tf":1.7320508075688772},"25":{"tf":2.23606797749979},"26":{"tf":2.0},"27":{"tf":1.7320508075688772},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}}},"df":0,"docs":{}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}},"l":{"'":{"df":2,"docs":{"25":{"tf":1.0},"28":{"tf":1.4142135623730951}}},"df":3,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772},"28":{"tf":1.0}}},"r":{"df":0,"docs":{},"f":{"a":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"n":{"df":3,"docs":{"25":{"tf":1.0},"26":{"tf":1.0},"27":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"0":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"s":{"df":0,"docs":{},"u":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.0}}}}}},"j":{"a":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"k":{"8":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"'":{"df":1,"docs":{"20":{"tf":1.0}}},"df":1,"docs":{"28":{"tf":1.0}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"27":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}},"r":{"df":0,"docs":{},"g":{"df":1,"docs":{"17":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"df":0,"docs":{},"i":{"b":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"5":{"tf":1.0}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}}},"k":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"x":{"df":2,"docs":{"19":{"tf":1.0},"20":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"t":{"df":4,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"v":{"df":0,"docs":{},"m":{"df":1,"docs":{"29":{"tf":1.0}}}}},"o":{"a":{"d":{"/":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"c":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.0}}}},"o":{"df":0,"docs":{},"k":{"df":2,"docs":{"20":{"tf":1.0},"25":{"tf":1.4142135623730951}}},"p":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"m":{"a":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":3,"docs":{"17":{"tf":1.0},"24":{"tf":1.0},"27":{"tf":1.0}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":2,"docs":{"27":{"tf":1.4142135623730951},"3":{"tf":1.0}}},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":1,"docs":{"23":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"h":{"df":2,"docs":{"4":{"tf":1.0},"5":{"tf":1.0}}}},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"b":{"df":1,"docs":{"21":{"tf":1.0}}},"c":{"a":{"df":1,"docs":{"29":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"22":{"tf":1.4142135623730951}},"e":{"a":{"df":0,"docs":{},"n":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"o":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"c":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.7320508075688772},"27":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"21":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":1,"docs":{"11":{"tf":1.7320508075688772}}},"r":{"df":0,"docs":{},"n":{"df":2,"docs":{"25":{"tf":1.0},"26":{"tf":1.0}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"v":{"a":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":1,"docs":{"22":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"v":{"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}}},"u":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"7":{"tf":1.0}}}}}}}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}},"n":{"]":{">":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"m":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"v":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"b":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"y":{"_":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"b":{":":{":":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{":":{":":{"a":{"d":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}},"g":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}},"x":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"10":{"tf":1.0}}}},"w":{"a":{"d":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"16":{"tf":1.0},"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"o":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"n":{"df":6,"docs":{"10":{"tf":1.0},"16":{"tf":1.0},"17":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"9":{"tf":1.0}}},"p":{"df":1,"docs":{"27":{"tf":1.0}},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"r":{"df":3,"docs":{"16":{"tf":2.23606797749979},"17":{"tf":3.0},"26":{"tf":1.4142135623730951}}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":5,"docs":{"11":{"tf":1.7320508075688772},"18":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}},"o":{"df":0,"docs":{},"n":{"df":3,"docs":{"15":{"tf":1.4142135623730951},"21":{"tf":1.0},"9":{"tf":1.0}}}}}}},"r":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"20":{"tf":1.0}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"p":{"a":{"c":{"df":0,"docs":{},"k":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"d":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.4142135623730951}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"9":{"tf":1.0}}}}},"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"f":{".":{"d":{"a":{"df":0,"docs":{},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":3,"docs":{"20":{"tf":1.7320508075688772},"21":{"tf":2.449489742783178},"22":{"tf":2.23606797749979}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":12,"docs":{"15":{"tf":1.7320508075688772},"16":{"tf":1.0},"17":{"tf":1.7320508075688772},"18":{"tf":1.7320508075688772},"19":{"tf":1.0},"20":{"tf":1.7320508075688772},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"23":{"tf":1.0},"25":{"tf":2.0},"26":{"tf":1.0},"28":{"tf":1.0}}}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"h":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}}}},"t":{"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":1,"docs":{"11":{"tf":1.0}}},"t":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"8":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"27":{"tf":1.0},"4":{"tf":1.4142135623730951}}}}},"r":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":2,"docs":{"3":{"tf":1.0},"8":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"r":{"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"18":{"tf":1.0},"3":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":2,"docs":{"21":{"tf":1.0},"26":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}}}},"o":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"8":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":4,"docs":{"16":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":6,"docs":{"18":{"tf":2.0},"19":{"tf":1.0},"20":{"tf":1.7320508075688772},"22":{"tf":1.0},"23":{"tf":1.7320508075688772},"26":{"tf":1.7320508075688772}}}}},"g":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"m":{"'":{"df":3,"docs":{"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951}}},"df":5,"docs":{"11":{"tf":1.4142135623730951},"18":{"tf":1.4142135623730951},"21":{"tf":1.7320508075688772},"22":{"tf":1.0},"8":{"tf":1.0}},"m":{"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}}},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"28":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"v":{"df":0,"docs":{},"i":{"d":{"df":5,"docs":{"10":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0},"27":{"tf":1.0},"3":{"tf":1.0}}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"e":{")":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"a":{"d":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":2,"docs":{"15":{"tf":1.0},"25":{"tf":1.0}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"d":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":2.8284271247461903}}},"df":0,"docs":{}}}},"d":{"df":1,"docs":{"22":{"tf":1.0}},"u":{"c":{"df":1,"docs":{"16":{"tf":1.0}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.0},"8":{"tf":1.0}}}}},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":2,"docs":{"15":{"tf":1.0},"21":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"l":{"a":{"c":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.7320508075688772},"26":{"tf":1.0}}}}},"z":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}}},"q":{"df":0,"docs":{},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":3,"docs":{"18":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"16":{"tf":1.0}}},"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}}}},"t":{"df":1,"docs":{"25":{"tf":2.0}},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"n":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"17":{"tf":1.0}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":2,"docs":{"14":{"tf":1.0},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"c":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}},"df":6,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}},"f":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"g":{"df":2,"docs":{"11":{"tf":1.0},"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}}}}},"s":{"a":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"8":{"tf":1.0}}}},"m":{"df":0,"docs":{},"e":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}},"h":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.4142135623730951},"22":{"tf":1.0}},"e":{"df":0,"docs":{},"e":{"df":2,"docs":{"10":{"tf":1.0},"17":{"tf":1.0}}},"p":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":6,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0},"21":{"tf":1.0},"23":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}},"h":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"5":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"m":{"d":{"<":{"[":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.7320508075688772}}}},"df":0,"docs":{}},"df":11,"docs":{"1":{"tf":1.0},"10":{"tf":1.0},"16":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.0},"20":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"26":{"tf":1.0}},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}}},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"z":{"df":0,"docs":{},"e":{"df":1,"docs":{"15":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"w":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":3,"docs":{"17":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}},"u":{"df":0,"docs":{},"r":{"c":{"df":4,"docs":{"18":{"tf":1.0},"22":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":3,"docs":{"11":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"q":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"e":{"2":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"3":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":3,"docs":{"11":{"tf":1.0},"15":{"tf":1.0},"25":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"26":{"tf":2.0}}}}},"r":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"m":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"u":{"b":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}}}}},"c":{"df":0,"docs":{},"h":{"df":4,"docs":{"11":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"m":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":1,"docs":{"17":{"tf":2.6457513110645907}},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":2.0},"11":{"tf":1.0},"20":{"tf":1.0},"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.7320508075688772}}}}}}}},"y":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"x":{"df":3,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}},"t":{"a":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"(":{"df":1,"docs":{"21":{"tf":1.0}}},"/":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"/":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"=":{"$":{"df":0,"docs":{},"{":{"df":0,"docs":{},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"i":{"6":{"8":{"6":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"12":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":5,"docs":{"10":{"tf":3.7416573867739413},"11":{"tf":2.8284271247461903},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.7320508075688772}}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"g":{"df":1,"docs":{"16":{"tf":1.0}}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"h":{"a":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":2,"docs":{"11":{"tf":1.0},"27":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":2,"docs":{"25":{"tf":1.0},"3":{"tf":1.0}}}},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}}}}}}}},"i":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772}}}},"p":{"df":1,"docs":{"3":{"tf":1.0}}}},"o":{"/":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"l":{"df":5,"docs":{"23":{"tf":1.0},"24":{"tf":1.0},"26":{"tf":1.0},"27":{"tf":1.7320508075688772},"28":{"tf":1.4142135623730951}}}}},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"i":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0}}}},"v":{"df":0,"docs":{},"i":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"o":{"df":4,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":1.0},"26":{"tf":1.0},"9":{"tf":1.0}}}}},"u":{"6":{"4":{"df":0,"docs":{},"x":{"2":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"20":{"tf":1.0},"8":{"tf":1.0}}}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":2,"docs":{"10":{"tf":1.0},"8":{"tf":1.0}}}}}},"p":{"c":{"df":0,"docs":{},"k":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"s":{"a":{"df":0,"docs":{},"f":{"df":2,"docs":{"15":{"tf":1.0},"23":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":3,"docs":{"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"8":{"tf":1.0}}},"s":{"df":12,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":2.23606797749979},"15":{"tf":1.4142135623730951},"17":{"tf":1.7320508075688772},"20":{"tf":1.4142135623730951},"21":{"tf":2.23606797749979},"22":{"tf":1.4142135623730951},"23":{"tf":1.0},"25":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":2.0}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.0}}}}}},"v":{"7":{"df":1,"docs":{"10":{"tf":1.0}}},"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"u":{"df":2,"docs":{"17":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"i":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}},"o":{"df":0,"docs":{},"u":{"df":4,"docs":{"20":{"tf":1.0},"21":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":8,"docs":{"10":{"tf":1.0},"15":{"tf":1.4142135623730951},"16":{"tf":2.6457513110645907},"17":{"tf":2.6457513110645907},"25":{"tf":1.0},"5":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"17":{"tf":1.0}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":2.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":2,"docs":{"16":{"tf":1.0},"9":{"tf":1.0}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"15":{"tf":1.0},"20":{"tf":1.0}}}}},"i":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"17":{"tf":1.0}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.7320508075688772}}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"k":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"4":{"tf":1.0}},"l":{"df":0,"docs":{},"o":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":3,"docs":{"15":{"tf":1.0},"21":{"tf":1.0},"3":{"tf":1.0}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"x":{"8":{"6":{"_":{"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"1":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"1":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"2":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"y":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"f":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}}}}}},"breadcrumbs":{"root":{"0":{",":{"3":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"7":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{".":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"(":{")":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"_":{"b":{"df":0,"docs":{},"y":{"(":{"4":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}}},"0":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":2,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":2.6457513110645907}}},"1":{",":{"5":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"0":{"0":{",":{"0":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"21":{"tf":1.0}}},"2":{",":{"5":{"0":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":2.8284271247461903}}},"2":{",":{"3":{"5":{"6":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},".":{"2":{"8":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"7":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"9":{"2":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"3":{".":{"5":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}},"df":2,"docs":{"10":{"tf":1.0},"16":{"tf":1.4142135623730951}}},"4":{"df":2,"docs":{"16":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}},"5":{",":{"0":{"6":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"6":{"df":1,"docs":{"16":{"tf":1.0}}},"7":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"9":{"3":{",":{"4":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"4":{",":{"1":{"8":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"a":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"7":{"5":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"b":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}}}}},"c":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"23":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":4,"docs":{"17":{"tf":1.0},"25":{"tf":1.7320508075688772},"26":{"tf":1.0},"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"d":{"d":{"df":1,"docs":{"7":{"tf":1.4142135623730951}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.4142135623730951},"17":{"tf":1.0}}}},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"c":{"df":2,"docs":{"10":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}}}},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":2,"docs":{"15":{"tf":2.0},"25":{"tf":1.4142135623730951}}}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"8":{"tf":1.0},"9":{"tf":1.0}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}}},"m":{"d":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":1,"docs":{"24":{"tf":1.4142135623730951}}}},"z":{"df":2,"docs":{"27":{"tf":1.4142135623730951},"28":{"tf":1.7320508075688772}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"8":{"tf":1.0}},"i":{"df":1,"docs":{"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"6":{"tf":1.4142135623730951}}}}}}}}},"r":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"28":{"tf":1.0}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":5,"docs":{"17":{"tf":1.4142135623730951},"25":{"tf":1.0},"27":{"tf":1.4142135623730951},"28":{"tf":1.4142135623730951},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}},"m":{"df":2,"docs":{"10":{"tf":1.0},"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"k":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"t":{"!":{"(":{"df":0,"docs":{},"x":{".":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":1,"docs":{"15":{"tf":1.7320508075688772}}}}},"o":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"t":{"&":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"12":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"3":{"tf":1.0}}},"df":0,"docs":{}}},"x":{"2":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"15":{"tf":1.0},"3":{"tf":1.0}}}},"df":0,"docs":{}}},"b":{"a":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"18":{"tf":1.0}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":2,"docs":{"22":{"tf":1.0},"28":{"tf":1.0}},"h":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"10":{"tf":1.0}}}}},"n":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":2,"docs":{"21":{"tf":1.4142135623730951},"22":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}}}},"i":{"df":0,"docs":{},"g":{"df":1,"docs":{"15":{"tf":1.0}}},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}},"r":{"d":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"o":{"df":0,"docs":{},"k":{"df":3,"docs":{"18":{"tf":1.0},"20":{"tf":1.0},"3":{"tf":1.0}}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"n":{"c":{"df":0,"docs":{},"h":{"df":3,"docs":{"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"d":{"df":5,"docs":{"11":{"tf":1.4142135623730951},"15":{"tf":1.4142135623730951},"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"k":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"c":{"/":{"c":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"h":{"df":2,"docs":{"21":{"tf":1.0},"23":{"tf":1.0}},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.7320508075688772},"23":{"tf":2.0}},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"o":{".":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"u":{"df":0,"docs":{},"s":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"df":3,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":1.4142135623730951},"25":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":7,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"16":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}}}}},"df":0,"docs":{}}}},"f":{"df":0,"docs":{},"g":{"(":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"18":{"tf":1.0}}}},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.0},"4":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":2,"docs":{"11":{"tf":1.0},"15":{"tf":2.23606797749979}}}},"df":0,"docs":{}}},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":12,"docs":{"11":{"tf":2.0},"18":{"tf":1.4142135623730951},"20":{"tf":1.0},"22":{"tf":1.0},"23":{"tf":1.0},"24":{"tf":1.4142135623730951},"25":{"tf":2.0},"27":{"tf":1.4142135623730951},"28":{"tf":1.4142135623730951},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}},"n":{"d":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"p":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":1.0},"11":{"tf":1.7320508075688772},"25":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.7320508075688772}},"e":{"df":0,"docs":{},"r":{"'":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}}},"u":{"df":0,"docs":{},"t":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"28":{"tf":1.0}}}}}},"n":{"d":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"t":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}},"r":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}}}}}}},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"u":{"'":{"df":1,"docs":{"26":{"tf":1.0}}},"=":{"<":{"c":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"x":{"df":1,"docs":{"11":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"df":7,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":3.0},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"25":{"tf":2.6457513110645907},"26":{"tf":1.4142135623730951},"27":{"tf":1.4142135623730951}}}},"r":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"9":{"tf":1.0}}}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"y":{"c":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"d":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"w":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":4,"docs":{"15":{"tf":1.7320508075688772},"18":{"tf":1.4142135623730951},"21":{"tf":1.0},"22":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":1,"docs":{"21":{"tf":1.0}}}}}}}}},"d":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"18":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":5,"docs":{"15":{"tf":1.0},"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951},"8":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":3,"docs":{"10":{"tf":1.0},"15":{"tf":1.0},"17":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"27":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":2,"docs":{"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}}},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"s":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"18":{"tf":1.0}}}},"d":{"df":0,"docs":{},"v":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}}}}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"o":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"df":1,"docs":{"17":{"tf":1.0}},"n":{"'":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"e":{"df":1,"docs":{"25":{"tf":1.0}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"a":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.0}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"9":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}}}}}}},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"a":{"b":{"df":0,"docs":{},"l":{"df":5,"docs":{"10":{"tf":2.6457513110645907},"21":{"tf":1.4142135623730951},"22":{"tf":1.0},"8":{"tf":2.0},"9":{"tf":1.0}}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"d":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.0}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"9":{"tf":1.0}}}}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}},"t":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}}}},"x":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":8,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"16":{"tf":1.4142135623730951},"17":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":4,"docs":{"18":{"tf":1.0},"21":{"tf":1.4142135623730951},"22":{"tf":1.7320508075688772},"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}}}}},"df":0,"docs":{}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":3,"docs":{"10":{"tf":1.7320508075688772},"25":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"f":{"3":{"2":{"df":1,"docs":{"17":{"tf":2.449489742783178}},"x":{"4":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"x":{"[":{"df":0,"docs":{},"i":{".":{".":{"]":{")":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"(":{"0":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"s":{"df":1,"docs":{"15":{"tf":1.4142135623730951}}}},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":2,"docs":{"11":{"tf":1.0},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"df":2,"docs":{"17":{"tf":1.0},"3":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":8,"docs":{"10":{"tf":2.8284271247461903},"11":{"tf":1.7320508075688772},"12":{"tf":1.0},"13":{"tf":1.0},"14":{"tf":1.7320508075688772},"20":{"tf":1.0},"8":{"tf":1.7320508075688772},"9":{"tf":1.0}},"e":{"=":{"+":{"df":0,"docs":{},"v":{"7":{",":{"+":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"<":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"s":{"=":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"2":{",":{"+":{"df":0,"docs":{},"f":{"df":0,"docs":{},"m":{"a":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"3":{",":{"+":{"a":{"df":0,"docs":{},"v":{"df":0,"docs":{},"x":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":1,"docs":{"27":{"tf":1.0}}}},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"h":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"l":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"9":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"t":{"df":4,"docs":{"4":{"tf":1.7320508075688772},"5":{"tf":1.0},"6":{"tf":1.0},"7":{"tf":1.0}}}},"df":0,"docs":{}}},"m":{"a":{"df":1,"docs":{"10":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"n":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":2,"docs":{"17":{"tf":1.0},"25":{"tf":1.4142135623730951}}}}}},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}},"r":{"a":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"k":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"28":{"tf":1.0}}}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"20":{"tf":1.0}}}},"n":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":5,"docs":{"15":{"tf":1.0},"17":{"tf":1.7320508075688772},"22":{"tf":1.4142135623730951},"25":{"tf":1.4142135623730951},"6":{"tf":1.4142135623730951}}}}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"7":{"tf":1.4142135623730951}}}}}},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":7,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":2.0},"18":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"r":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"h":{"=":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"f":{"df":1,"docs":{"21":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"b":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}},"u":{"a":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"h":{"a":{"df":0,"docs":{},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":2,"docs":{"17":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"r":{"d":{"df":0,"docs":{},"w":{"a":{"df":0,"docs":{},"r":{"df":2,"docs":{"20":{"tf":1.0},"21":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":2,"docs":{"21":{"tf":1.0},"25":{"tf":1.0}}}}}},"v":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"p":{"df":1,"docs":{"23":{"tf":1.0}}},"r":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":3,"docs":{"21":{"tf":1.0},"23":{"tf":1.0},"3":{"tf":1.0}}}},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"22":{"tf":1.0},"25":{"tf":1.0}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"2":{"tf":1.4142135623730951}}}}}}}},"o":{"df":0,"docs":{},"l":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"z":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":2.0},"17":{"tf":2.6457513110645907}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.4142135623730951}}}},"t":{"df":0,"docs":{},"s":{"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"i":{".":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.4142135623730951}}},"a":{"c":{"a":{"df":1,"docs":{"28":{"tf":1.7320508075688772}}},"df":0,"docs":{}},"df":0,"docs":{}},"d":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":3,"docs":{"11":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"3":{"tf":1.0}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":1,"docs":{"15":{"tf":1.0}}}},"m":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"a":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":2,"docs":{"18":{"tf":1.0},"25":{"tf":1.0}}}}}}},"n":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"10":{"tf":1.0},"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"v":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"u":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"i":{"df":1,"docs":{"27":{"tf":1.0}}}},"df":0,"docs":{}}}}},"f":{"df":0,"docs":{},"o":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}},"r":{"df":0,"docs":{},"m":{"df":4,"docs":{"16":{"tf":1.0},"18":{"tf":1.0},"27":{"tf":1.0},"4":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"23":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"13":{"tf":1.4142135623730951}}}}},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"17":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"e":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":1.7320508075688772},"22":{"tf":1.7320508075688772},"25":{"tf":2.23606797749979},"26":{"tf":2.0},"27":{"tf":1.7320508075688772},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}}},"df":0,"docs":{}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}},"l":{"'":{"df":2,"docs":{"25":{"tf":1.0},"28":{"tf":1.7320508075688772}}},"df":3,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772},"28":{"tf":1.0}}},"r":{"df":0,"docs":{},"f":{"a":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}},"n":{"df":3,"docs":{"25":{"tf":1.0},"26":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"0":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}},"s":{"df":0,"docs":{},"u":{"df":2,"docs":{"21":{"tf":1.0},"27":{"tf":1.0}}}}}},"j":{"a":{"df":0,"docs":{},"g":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"k":{"8":{"'":{"df":1,"docs":{"25":{"tf":1.0}}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"'":{"df":1,"docs":{"20":{"tf":1.0}}},"df":1,"docs":{"28":{"tf":1.0}}}}}},"y":{"df":1,"docs":{"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"27":{"tf":1.0}},"l":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"g":{"df":1,"docs":{"27":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}},"r":{"df":0,"docs":{},"g":{"df":1,"docs":{"17":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.0}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"df":0,"docs":{},"i":{"b":{"c":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"5":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}}}}},"n":{"df":0,"docs":{},"e":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":1.0}}},"k":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"x":{"df":2,"docs":{"19":{"tf":1.4142135623730951},"20":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"t":{"df":4,"docs":{"10":{"tf":1.7320508075688772},"11":{"tf":1.4142135623730951},"21":{"tf":1.4142135623730951},"8":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"v":{"df":0,"docs":{},"m":{"df":1,"docs":{"29":{"tf":1.4142135623730951}}}}},"o":{"a":{"d":{"/":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}},"c":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.0}}}},"o":{"df":0,"docs":{},"k":{"df":2,"docs":{"20":{"tf":1.0},"25":{"tf":1.4142135623730951}}},"p":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"m":{"a":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":3,"docs":{"17":{"tf":1.0},"24":{"tf":1.4142135623730951},"27":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":2,"docs":{"27":{"tf":1.4142135623730951},"3":{"tf":1.0}}},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":1,"docs":{"23":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"h":{"df":4,"docs":{"4":{"tf":1.4142135623730951},"5":{"tf":1.7320508075688772},"6":{"tf":1.0},"7":{"tf":1.0}}}},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"b":{"df":1,"docs":{"21":{"tf":1.0}}},"c":{"a":{"df":1,"docs":{"29":{"tf":1.4142135623730951}}},"df":0,"docs":{}},"df":1,"docs":{"22":{"tf":1.4142135623730951}},"e":{"a":{"df":0,"docs":{},"n":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}},"t":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{},"m":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.4142135623730951}}}}}},"t":{"df":0,"docs":{},"h":{"df":0,"docs":{},"o":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"c":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":2.0},"27":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.0}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"21":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}},"l":{"df":1,"docs":{"11":{"tf":1.7320508075688772}}},"r":{"df":0,"docs":{},"n":{"df":2,"docs":{"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}}},"r":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}}},"v":{"a":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.0}}},"h":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"df":1,"docs":{"22":{"tf":1.0}}}}}}},"s":{"df":0,"docs":{},"v":{"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}}},"u":{"c":{"df":0,"docs":{},"h":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"7":{"tf":1.4142135623730951}}}}}}}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}},"n":{"]":{">":{":":{":":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"e":{"d":{"(":{"&":{"df":0,"docs":{},"m":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"v":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"b":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"i":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"y":{"_":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"b":{":":{":":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{":":{":":{"a":{"d":{"df":0,"docs":{},"v":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{}}},"df":0,"docs":{}}},"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"10":{"tf":1.0}}},"df":0,"docs":{}},"g":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"10":{"tf":1.0}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"11":{"tf":1.0}}}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"21":{"tf":1.0}}}}},"x":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{},"p":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"10":{"tf":1.0}}}},"w":{"a":{"d":{"a":{"df":0,"docs":{},"y":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"16":{"tf":1.0},"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"o":{"df":0,"docs":{},"f":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"n":{"df":6,"docs":{"10":{"tf":1.0},"16":{"tf":1.0},"17":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"9":{"tf":1.0}}},"p":{"df":1,"docs":{"27":{"tf":1.0}},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"r":{"df":3,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":3.1622776601683795},"26":{"tf":1.4142135623730951}}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":5,"docs":{"11":{"tf":1.7320508075688772},"18":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}},"o":{"df":0,"docs":{},"n":{"df":3,"docs":{"15":{"tf":1.4142135623730951},"21":{"tf":1.0},"9":{"tf":1.0}}}}}}},"r":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"18":{"tf":1.4142135623730951},"26":{"tf":1.0}}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"16":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.4142135623730951},"27":{"tf":1.0}}}}},"s":{"df":0,"docs":{},"i":{"d":{"df":1,"docs":{"20":{"tf":1.0}}},"df":0,"docs":{}}}}},"v":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"p":{"a":{"c":{"df":0,"docs":{},"k":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"d":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{"df":1,"docs":{"3":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"21":{"tf":1.4142135623730951}},"i":{"c":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"9":{"tf":1.0}}}}},"c":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"f":{".":{"d":{"a":{"df":0,"docs":{},"t":{"a":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":3,"docs":{"20":{"tf":2.0},"21":{"tf":2.449489742783178},"22":{"tf":2.23606797749979}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":15,"docs":{"15":{"tf":1.7320508075688772},"16":{"tf":1.0},"17":{"tf":2.0},"18":{"tf":2.0},"19":{"tf":1.7320508075688772},"20":{"tf":2.0},"21":{"tf":1.7320508075688772},"22":{"tf":1.4142135623730951},"23":{"tf":1.4142135623730951},"24":{"tf":1.0},"25":{"tf":2.23606797749979},"26":{"tf":1.4142135623730951},"27":{"tf":1.0},"28":{"tf":1.4142135623730951},"29":{"tf":1.0}}}}}},"t":{"a":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"4":{"tf":1.0}}}}},"df":0,"docs":{}}}},"h":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"27":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}}}},"t":{"df":0,"docs":{},"f":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}}},"l":{"a":{"df":0,"docs":{},"n":{"df":1,"docs":{"11":{"tf":1.0}}},"t":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"8":{"tf":1.0}}}}}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":5,"docs":{"27":{"tf":1.0},"4":{"tf":1.7320508075688772},"5":{"tf":1.0},"6":{"tf":1.0},"7":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":2,"docs":{"3":{"tf":1.0},"8":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"w":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"r":{"a":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"18":{"tf":1.0},"3":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"26":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":2,"docs":{"21":{"tf":1.0},"26":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}}}},"o":{"c":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":1,"docs":{"8":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":4,"docs":{"16":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0},"8":{"tf":1.0}}}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":12,"docs":{"18":{"tf":2.23606797749979},"19":{"tf":1.7320508075688772},"20":{"tf":2.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951},"23":{"tf":2.0},"24":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":2.0},"27":{"tf":1.0},"28":{"tf":1.0},"29":{"tf":1.0}}}}},"g":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"m":{"'":{"df":3,"docs":{"18":{"tf":1.0},"21":{"tf":1.0},"22":{"tf":1.4142135623730951}}},"df":5,"docs":{"11":{"tf":1.4142135623730951},"18":{"tf":1.4142135623730951},"21":{"tf":1.7320508075688772},"22":{"tf":1.0},"8":{"tf":1.0}},"m":{"df":1,"docs":{"23":{"tf":1.0}}}}},"df":0,"docs":{}}},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"23":{"tf":1.0}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"11":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"28":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"v":{"df":0,"docs":{},"i":{"d":{"df":5,"docs":{"10":{"tf":1.0},"18":{"tf":1.0},"22":{"tf":1.0},"27":{"tf":1.0},"3":{"tf":1.0}}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"e":{")":{"df":0,"docs":{},"r":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"a":{"d":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}},"df":2,"docs":{"15":{"tf":1.0},"25":{"tf":1.0}}},"df":0,"docs":{}},"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"d":{"df":2,"docs":{"18":{"tf":1.0},"21":{"tf":3.0}}},"df":0,"docs":{}}}},"d":{"df":1,"docs":{"22":{"tf":1.0}},"u":{"c":{"df":1,"docs":{"16":{"tf":1.0}},"t":{"df":1,"docs":{"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"25":{"tf":1.0},"8":{"tf":1.0}}}}},"g":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":2,"docs":{"15":{"tf":1.0},"21":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}},"l":{"a":{"c":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":2.0},"26":{"tf":1.0}}}}},"z":{"df":1,"docs":{"25":{"tf":1.4142135623730951}}}},"q":{"df":0,"docs":{},"u":{"df":0,"docs":{},"i":{"df":0,"docs":{},"r":{"df":3,"docs":{"18":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"18":{"tf":1.0}}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"16":{"tf":1.0}}},"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.7320508075688772}}}}}},"t":{"df":1,"docs":{"25":{"tf":2.0}},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"n":{"df":2,"docs":{"11":{"tf":1.4142135623730951},"17":{"tf":1.0}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":2,"docs":{"14":{"tf":1.4142135623730951},"25":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"c":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.4142135623730951}}},"df":6,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.0}},"f":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"g":{"df":2,"docs":{"11":{"tf":1.0},"9":{"tf":1.7320508075688772}}}},"df":0,"docs":{}}}}}}},"s":{"a":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"8":{"tf":1.0}}}},"m":{"df":0,"docs":{},"e":{"df":1,"docs":{"16":{"tf":1.4142135623730951}}},"p":{"df":0,"docs":{},"l":{"df":1,"docs":{"21":{"tf":1.0}}}}}},"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":0,"docs":{}}},"h":{"df":0,"docs":{},"e":{"d":{"df":0,"docs":{},"u":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":1,"docs":{"20":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.4142135623730951},"22":{"tf":1.0}},"e":{"df":0,"docs":{},"e":{"df":2,"docs":{"10":{"tf":1.0},"17":{"tf":1.0}}},"p":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":6,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0},"21":{"tf":1.0},"23":{"tf":1.0},"25":{"tf":1.0},"27":{"tf":1.0}}}},"h":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"5":{"tf":1.4142135623730951}}}}}},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"c":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"m":{"d":{"<":{"[":{"df":0,"docs":{},"t":{"df":1,"docs":{"15":{"tf":1.7320508075688772}}}},"df":0,"docs":{}},"df":11,"docs":{"1":{"tf":1.4142135623730951},"10":{"tf":1.0},"16":{"tf":1.0},"18":{"tf":1.0},"2":{"tf":1.4142135623730951},"20":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.4142135623730951},"9":{"tf":1.0}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"25":{"tf":1.0},"26":{"tf":1.0}},"i":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"n":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}}},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"25":{"tf":1.0}}}},"df":0,"docs":{}}},"z":{"df":0,"docs":{},"e":{"df":1,"docs":{"15":{"tf":1.0}}}}},"l":{"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"e":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":2,"docs":{"15":{"tf":1.7320508075688772},"17":{"tf":1.4142135623730951}}}},"df":0,"docs":{}},"o":{"df":0,"docs":{},"w":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"(":{"df":0,"docs":{},"x":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":1,"docs":{"17":{"tf":1.4142135623730951}}}}}},"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}}}},"df":3,"docs":{"17":{"tf":1.0},"21":{"tf":1.0},"26":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"8":{"tf":1.0}}}},"u":{"df":0,"docs":{},"r":{"c":{"df":4,"docs":{"18":{"tf":1.0},"22":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"i":{"df":0,"docs":{},"f":{"df":3,"docs":{"11":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":1.0}},"i":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"df":0,"docs":{},"e":{"d":{"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"n":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}},"q":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"s":{"df":0,"docs":{},"e":{"2":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"3":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"t":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":1,"docs":{"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"p":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.0}}}},"i":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":3,"docs":{"11":{"tf":1.0},"15":{"tf":1.0},"25":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":2,"docs":{"15":{"tf":1.0},"26":{"tf":2.0}}}}},"r":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"m":{"df":1,"docs":{"10":{"tf":1.0}}}},"df":0,"docs":{}},"u":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"22":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"u":{"b":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"j":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":2,"docs":{"22":{"tf":1.0},"26":{"tf":1.0}}}},"df":0,"docs":{}}},"t":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}}}}},"c":{"df":0,"docs":{},"h":{"df":4,"docs":{"11":{"tf":1.0},"21":{"tf":1.0},"25":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"m":{".":{"df":0,"docs":{},"s":{"df":0,"docs":{},"u":{"df":0,"docs":{},"m":{"df":1,"docs":{"17":{"tf":1.0}}}}}},"df":1,"docs":{"17":{"tf":2.6457513110645907}},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"df":0,"docs":{}}},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"c":{"a":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"r":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":7,"docs":{"10":{"tf":2.0},"11":{"tf":1.0},"20":{"tf":1.0},"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.7320508075688772}}}}}}}},"y":{"df":0,"docs":{},"m":{"b":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"df":0,"docs":{}},"n":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"x":{"df":3,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"22":{"tf":1.4142135623730951}}}},"df":0,"docs":{}}},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"m":{"df":1,"docs":{"20":{"tf":1.0}}}}}}}},"t":{"a":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"(":{"df":1,"docs":{"21":{"tf":1.0}}},"/":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"/":{"df":0,"docs":{},"m":{"df":0,"docs":{},"i":{"df":1,"docs":{"21":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{}}}}}},"=":{"$":{"df":0,"docs":{},"{":{"df":0,"docs":{},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.0},"11":{"tf":1.0}}}}}}}}},"df":0,"docs":{},"i":{"6":{"8":{"6":{"df":1,"docs":{"11":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{}}},"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"12":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{}}}},"df":9,"docs":{"10":{"tf":4.0},"11":{"tf":3.1622776601683795},"12":{"tf":1.0},"13":{"tf":1.0},"14":{"tf":1.0},"25":{"tf":1.4142135623730951},"27":{"tf":1.0},"8":{"tf":2.0},"9":{"tf":1.0}}}}}}},"df":1,"docs":{"15":{"tf":1.0}},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":1,"docs":{"25":{"tf":1.0}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":0,"docs":{},"o":{"df":0,"docs":{},"g":{"df":1,"docs":{"16":{"tf":1.0}}}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"17":{"tf":1.0}}}}},"h":{"a":{"df":0,"docs":{},"t":{"'":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":2,"docs":{"11":{"tf":1.0},"27":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"g":{"df":1,"docs":{"26":{"tf":1.4142135623730951}}}}},"o":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":2,"docs":{"25":{"tf":1.0},"3":{"tf":1.0}}}},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":1,"docs":{"10":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"g":{"df":0,"docs":{},"h":{"df":0,"docs":{},"t":{"df":0,"docs":{},"p":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"27":{"tf":1.0}}}}}}}}}}}},"i":{"df":0,"docs":{},"m":{"df":0,"docs":{},"e":{"df":2,"docs":{"21":{"tf":1.0},"22":{"tf":1.7320508075688772}}}},"p":{"df":1,"docs":{"3":{"tf":1.0}}}},"o":{"/":{"df":0,"docs":{},"f":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"m":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}}},"o":{"df":0,"docs":{},"l":{"df":5,"docs":{"23":{"tf":1.0},"24":{"tf":1.4142135623730951},"26":{"tf":1.0},"27":{"tf":1.7320508075688772},"28":{"tf":1.4142135623730951}}}}},"r":{"a":{"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"11":{"tf":1.0}}}},"df":0,"docs":{}}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"e":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}}},"i":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"26":{"tf":1.0}}}},"df":0,"docs":{},"g":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"8":{"tf":1.0}}}}}},"p":{"df":0,"docs":{},"l":{"df":2,"docs":{"10":{"tf":1.4142135623730951},"11":{"tf":1.0}}}},"v":{"df":0,"docs":{},"i":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"3":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"df":0,"docs":{},"o":{"df":4,"docs":{"16":{"tf":2.449489742783178},"17":{"tf":1.0},"26":{"tf":1.0},"9":{"tf":1.0}}}}},"u":{"6":{"4":{"df":0,"docs":{},"x":{"2":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"g":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}}},"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"8":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"a":{"df":0,"docs":{},"n":{"d":{"df":2,"docs":{"26":{"tf":1.0},"27":{"tf":1.0}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}}},"df":0,"docs":{},"f":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"df":1,"docs":{"3":{"tf":1.0}}}}}}}},"i":{"df":0,"docs":{},"t":{"df":2,"docs":{"20":{"tf":1.0},"8":{"tf":1.0}}}},"l":{"df":0,"docs":{},"e":{"df":0,"docs":{},"s":{"df":0,"docs":{},"s":{"df":2,"docs":{"10":{"tf":1.0},"8":{"tf":1.0}}}}}},"p":{"c":{"df":0,"docs":{},"k":{"df":0,"docs":{},"l":{"df":0,"docs":{},"p":{"d":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"s":{"a":{"df":0,"docs":{},"f":{"df":2,"docs":{"15":{"tf":1.0},"23":{"tf":1.0}}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"p":{"df":3,"docs":{"21":{"tf":1.4142135623730951},"25":{"tf":1.0},"8":{"tf":1.0}}},"s":{"df":12,"docs":{"10":{"tf":2.23606797749979},"11":{"tf":2.23606797749979},"15":{"tf":1.4142135623730951},"17":{"tf":1.7320508075688772},"20":{"tf":1.7320508075688772},"21":{"tf":2.23606797749979},"22":{"tf":1.4142135623730951},"23":{"tf":1.4142135623730951},"25":{"tf":1.0},"3":{"tf":1.0},"8":{"tf":1.0},"9":{"tf":2.23606797749979}}},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"23":{"tf":1.0},"26":{"tf":1.0}}}}}},"v":{"7":{"df":1,"docs":{"10":{"tf":1.0}}},"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"u":{"df":2,"docs":{"17":{"tf":1.4142135623730951},"26":{"tf":1.0}}}},"r":{"df":0,"docs":{},"i":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}},"o":{"df":0,"docs":{},"u":{"df":4,"docs":{"20":{"tf":1.0},"21":{"tf":1.0},"28":{"tf":1.0},"8":{"tf":1.0}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":8,"docs":{"10":{"tf":1.0},"15":{"tf":1.4142135623730951},"16":{"tf":2.6457513110645907},"17":{"tf":2.6457513110645907},"25":{"tf":1.0},"5":{"tf":1.4142135623730951},"8":{"tf":1.0},"9":{"tf":1.4142135623730951}}}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"17":{"tf":1.0}}},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"15":{"tf":1.0}}}}}},"t":{"df":0,"docs":{},"i":{"c":{"df":2,"docs":{"16":{"tf":2.0},"17":{"tf":2.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}}},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"17":{"tf":1.0}}}},"df":0,"docs":{}}}},"s":{"df":0,"docs":{},"u":{"a":{"df":0,"docs":{},"l":{"df":1,"docs":{"22":{"tf":1.0}}}},"df":0,"docs":{}}}}},"w":{"a":{"df":0,"docs":{},"y":{"df":2,"docs":{"16":{"tf":1.0},"9":{"tf":1.0}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"l":{"df":2,"docs":{"15":{"tf":1.0},"20":{"tf":1.0}}}}},"i":{"d":{"df":0,"docs":{},"e":{"df":1,"docs":{"17":{"tf":1.0}}},"t":{"df":0,"docs":{},"h":{"df":1,"docs":{"16":{"tf":1.7320508075688772}}}}},"df":0,"docs":{},"n":{"d":{"df":0,"docs":{},"o":{"df":0,"docs":{},"w":{"df":1,"docs":{"11":{"tf":1.0}}}}},"df":0,"docs":{}}},"o":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"21":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"k":{"df":3,"docs":{"11":{"tf":1.0},"17":{"tf":1.0},"4":{"tf":1.0}},"l":{"df":0,"docs":{},"o":{"a":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}}}}}},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":3,"docs":{"15":{"tf":1.0},"21":{"tf":1.0},"3":{"tf":1.0}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}}},"o":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"df":1,"docs":{"21":{"tf":1.0}}}}}}},"x":{"8":{"6":{"_":{"6":{"4":{"df":1,"docs":{"8":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":1,"docs":{"25":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"1":{"df":1,"docs":{"22":{"tf":1.4142135623730951}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"1":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"2":{"df":1,"docs":{"22":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"2":{",":{"df":0,"docs":{},"x":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"0":{"df":1,"docs":{"22":{"tf":1.7320508075688772}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"y":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":0,"docs":{},"s":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"df":0,"docs":{},"f":{"df":1,"docs":{"17":{"tf":1.0}}}}}}}}}}}},"title":{"root":{"a":{"d":{"d":{"df":1,"docs":{"7":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"y":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":1,"docs":{"24":{"tf":1.0}}}},"z":{"df":2,"docs":{"27":{"tf":1.0},"28":{"tf":1.0}}}}}},"df":0,"docs":{}},"p":{"df":0,"docs":{},"p":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"x":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"6":{"tf":1.0}}}}}}}}},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"28":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}},"t":{"df":0,"docs":{},"t":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"u":{"df":0,"docs":{},"t":{"df":1,"docs":{"12":{"tf":1.0}}}}},"df":0,"docs":{}}}}}},"b":{"df":0,"docs":{},"o":{"df":0,"docs":{},"u":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"15":{"tf":1.0}}},"df":0,"docs":{}}}}},"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"k":{"df":1,"docs":{"15":{"tf":1.0}}}},"df":0,"docs":{}}},"o":{"d":{"df":0,"docs":{},"e":{"df":3,"docs":{"24":{"tf":1.0},"27":{"tf":1.0},"28":{"tf":1.0}}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":1,"docs":{"17":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"p":{"df":0,"docs":{},"u":{"df":3,"docs":{"11":{"tf":1.0},"25":{"tf":1.0},"26":{"tf":1.0}}}}},"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}},"df":0,"docs":{}}}},"i":{"df":0,"docs":{},"s":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"v":{"df":1,"docs":{"3":{"tf":1.0}}}}},"df":0,"docs":{}}}},"df":0,"docs":{},"e":{"df":0,"docs":{},"n":{"a":{"b":{"df":0,"docs":{},"l":{"df":1,"docs":{"8":{"tf":1.0}}}},"df":0,"docs":{}},"df":0,"docs":{}}},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":3,"docs":{"10":{"tf":1.0},"14":{"tf":1.0},"8":{"tf":1.0}}}}}},"df":0,"docs":{}},"l":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"t":{"df":1,"docs":{"4":{"tf":1.0}}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"n":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":1,"docs":{"6":{"tf":1.0}}}}}}},"df":0,"docs":{}},"s":{"df":0,"docs":{},"e":{"df":1,"docs":{"7":{"tf":1.0}}}}}},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"s":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"2":{"tf":1.0}}}}}}}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"z":{"df":0,"docs":{},"o":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.0}}}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"14":{"tf":1.0}}}}}},"i":{"a":{"c":{"a":{"df":1,"docs":{"28":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{}},"df":0,"docs":{},"n":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":1,"docs":{"13":{"tf":1.0}}}}},"t":{"df":0,"docs":{},"e":{"df":0,"docs":{},"l":{"'":{"df":1,"docs":{"28":{"tf":1.0}}},"df":0,"docs":{}},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"26":{"tf":1.0}}}}},"r":{"df":0,"docs":{},"o":{"d":{"df":0,"docs":{},"u":{"c":{"df":0,"docs":{},"t":{"df":1,"docs":{"0":{"tf":1.0}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}}}},"l":{"df":0,"docs":{},"i":{"b":{"df":0,"docs":{},"r":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"5":{"tf":1.0}}}}},"df":0,"docs":{}}},"df":0,"docs":{},"n":{"df":0,"docs":{},"u":{"df":0,"docs":{},"x":{"df":1,"docs":{"19":{"tf":1.0}}}}}},"l":{"df":0,"docs":{},"v":{"df":0,"docs":{},"m":{"df":1,"docs":{"29":{"tf":1.0}}}}}},"m":{"a":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":2,"docs":{"24":{"tf":1.0},"27":{"tf":1.0}}}}}},"df":0,"docs":{},"t":{"df":0,"docs":{},"h":{"df":2,"docs":{"4":{"tf":1.0},"5":{"tf":1.0}}}}},"c":{"a":{"df":1,"docs":{"29":{"tf":1.0}}},"df":0,"docs":{}},"df":0,"docs":{},"i":{"c":{"df":0,"docs":{},"r":{"df":0,"docs":{},"o":{"a":{"df":0,"docs":{},"r":{"c":{"df":0,"docs":{},"h":{"df":0,"docs":{},"i":{"df":0,"docs":{},"t":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}}}}}},"df":0,"docs":{}}},"df":0,"docs":{}}}},"df":0,"docs":{}},"o":{"d":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"n":{"df":1,"docs":{"25":{"tf":1.0}}}}}},"df":0,"docs":{}},"u":{"df":0,"docs":{},"l":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"p":{"df":0,"docs":{},"l":{"df":0,"docs":{},"i":{"df":1,"docs":{"7":{"tf":1.0}}}}}}}}}},"o":{"df":0,"docs":{},"p":{"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":2,"docs":{"16":{"tf":1.0},"17":{"tf":1.0}}}}}},"p":{"a":{"c":{"df":0,"docs":{},"k":{"df":0,"docs":{},"e":{"d":{"_":{"df":0,"docs":{},"s":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"d":{"df":1,"docs":{"3":{"tf":1.0}}},"df":0,"docs":{}}}}},"df":0,"docs":{}},"df":0,"docs":{}}}},"df":0,"docs":{}},"df":0,"docs":{},"e":{"df":0,"docs":{},"r":{"df":0,"docs":{},"f":{"df":1,"docs":{"20":{"tf":1.0}},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"m":{"df":3,"docs":{"17":{"tf":1.0},"18":{"tf":1.0},"19":{"tf":1.0}}}}}}}},"o":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":1,"docs":{"4":{"tf":1.0}}}}}},"r":{"df":0,"docs":{},"o":{"df":0,"docs":{},"f":{"df":0,"docs":{},"i":{"df":0,"docs":{},"l":{"df":2,"docs":{"18":{"tf":1.0},"19":{"tf":1.0}}}}}}}},"r":{"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"d":{"df":1,"docs":{"21":{"tf":1.0}}},"df":0,"docs":{}}}},"df":0,"docs":{},"p":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"22":{"tf":1.0}}}}}}},"u":{"df":0,"docs":{},"n":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"df":0,"docs":{},"m":{"df":1,"docs":{"14":{"tf":1.0}}}}}},"s":{"df":0,"docs":{},"t":{"df":1,"docs":{"2":{"tf":1.0}},"f":{"df":0,"docs":{},"l":{"a":{"df":0,"docs":{},"g":{"df":1,"docs":{"9":{"tf":1.0}}}},"df":0,"docs":{}}}}}}},"s":{"df":0,"docs":{},"h":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":1,"docs":{"5":{"tf":1.0}}}}}},"i":{"df":0,"docs":{},"m":{"d":{"df":2,"docs":{"1":{"tf":1.0},"2":{"tf":1.0}}},"df":0,"docs":{}}},"u":{"df":0,"docs":{},"m":{"df":0,"docs":{},"m":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":1,"docs":{"26":{"tf":1.0}}}}},"df":0,"docs":{}}}}},"t":{"a":{"df":0,"docs":{},"r":{"df":0,"docs":{},"g":{"df":0,"docs":{},"e":{"df":0,"docs":{},"t":{"_":{"df":0,"docs":{},"f":{"df":0,"docs":{},"e":{"a":{"df":0,"docs":{},"t":{"df":0,"docs":{},"u":{"df":0,"docs":{},"r":{"df":1,"docs":{"12":{"tf":1.0}}}}}},"df":0,"docs":{}}}},"df":3,"docs":{"10":{"tf":1.0},"11":{"tf":1.0},"8":{"tf":1.0}}}}}}},"df":0,"docs":{},"o":{"df":0,"docs":{},"o":{"df":0,"docs":{},"l":{"df":1,"docs":{"24":{"tf":1.0}}}}}},"u":{"df":0,"docs":{},"s":{"df":3,"docs":{"20":{"tf":1.0},"23":{"tf":1.0},"9":{"tf":1.0}}}},"v":{"a":{"df":0,"docs":{},"l":{"df":0,"docs":{},"g":{"df":0,"docs":{},"r":{"df":0,"docs":{},"i":{"df":0,"docs":{},"n":{"d":{"df":1,"docs":{"23":{"tf":1.0}}},"df":0,"docs":{}}}}}}},"df":0,"docs":{},"e":{"c":{"df":0,"docs":{},"t":{"df":0,"docs":{},"o":{"df":0,"docs":{},"r":{"df":1,"docs":{"5":{"tf":1.0}}}}}},"df":0,"docs":{},"r":{"df":0,"docs":{},"t":{"df":0,"docs":{},"i":{"c":{"df":1,"docs":{"16":{"tf":1.0}}},"df":0,"docs":{}}}}},"i":{"df":0,"docs":{},"e":{"df":0,"docs":{},"w":{"df":1,"docs":{"22":{"tf":1.0}}}}}}}}},"lang":"English","pipeline":["trimmer","stopWordFilter","stemmer"],"ref":"id","version":"0.9.5"},"results_options":{"limit_results":30,"teaser_word_count":30},"search_options":{"bool":"OR","expand":true,"fields":{"body":{"boost":1},"breadcrumbs":{"boost":1},"title":{"boost":2}}}} \ No newline at end of file diff --git a/perf-guide/src/ascii.css b/perf-guide/src/ascii.css new file mode 100644 index 000000000..4c0265119 --- /dev/null +++ b/perf-guide/src/ascii.css @@ -0,0 +1,4 @@ +code { + /* "Source Code Pro" breaks ASCII art */ + font-family: Consolas, "Ubuntu Mono", Menlo, "DejaVu Sans Mono", monospace; +} diff --git a/perf-guide/target-feature/attribute.html b/perf-guide/target-feature/attribute.html new file mode 100644 index 000000000..15c1be9c4 --- /dev/null +++ b/perf-guide/target-feature/attribute.html @@ -0,0 +1,232 @@ + + + + + + Using the target_feature attribute - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

The target_feature attribute

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/target-feature/features.html b/perf-guide/target-feature/features.html new file mode 100644 index 000000000..058bc6bdb --- /dev/null +++ b/perf-guide/target-feature/features.html @@ -0,0 +1,237 @@ + + + + + + Target features - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Enabling target features

+

Not all processors of a certain architecture will have SIMD processing units, +and using a SIMD instruction which is not supported will trigger undefined behavior.

+

To allow building safe, portable programs, the Rust compiler will not, by default, +generate any sort of vector instructions, unless it can statically determine +they are supported. For example, on AMD64, SSE2 support is architecturally guaranteed. +The x86_64-apple-darwin target enables up to SSSE3. The get a defintive list of +which features are enabled by default on various platforms, refer to the target +specifications in the compiler's source code.

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/target-feature/inlining.html b/perf-guide/target-feature/inlining.html new file mode 100644 index 000000000..cb1de3b72 --- /dev/null +++ b/perf-guide/target-feature/inlining.html @@ -0,0 +1,232 @@ + + + + + + Interaction with inlining - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Inlining

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/target-feature/runtime.html b/perf-guide/target-feature/runtime.html new file mode 100644 index 000000000..a44258903 --- /dev/null +++ b/perf-guide/target-feature/runtime.html @@ -0,0 +1,232 @@ + + + + + + Detecting features at runtime - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Detecting host features at runtime

+ + +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/target-feature/rustflags.html b/perf-guide/target-feature/rustflags.html new file mode 100644 index 000000000..26906316e --- /dev/null +++ b/perf-guide/target-feature/rustflags.html @@ -0,0 +1,303 @@ + + + + + + Using RUSTFLAGS - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Using RUSTFLAGS

+

One of the easiest ways to benefit from SIMD is to allow the compiler +to generate code using certain vector instruction extensions.

+

The environment variable RUSTFLAGS can be used to pass options for code +generation to the Rust compiler. These flags will affect all compiled crates.

+

There are two flags which can be used to enable specific vector extensions:

+

target-feature

+
    +
  • +

    Syntax: -C target-feature=<features>

    +
  • +
  • +

    Provides the compiler with a comma-separated set of instruction extensions +to enable.

    +

    Example: Use -C target-features=+sse3,+avx to enable generating instructions +for Streaming SIMD Extensions 3 and +Advanced Vector Extensions.

    +
  • +
  • +

    To list target triples for all targets supported by Rust, use:

    +
    rustc --print target-list
    +
    +
  • +
  • +

    To list all support target features for a certain target triple, use:

    +
    rustc --target=${TRIPLE} --print target-features
    +
    +
  • +
  • +

    Note that all CPU features are independent, and will have to be enabled individually.

    +

    Example: Setting -C target-features=+avx2 will not enable fma, even though +all CPUs which support AVX2 also support FMA. To enable both, one has to use +-C target-features=+avx2,+fma

    +
  • +
  • +

    Some features also depend on other features, which need to be enabled for the +target instructions to be generated.

    +

    Example: Unless v7 is specified as the target CPU (see below), to enable +NEON on ARM it is necessary to use -C target-feature=+v7,+neon.

    +
  • +
+

target-cpu

+
    +
  • +

    Syntax: -C target-cpu=<cpu>

    +
  • +
  • +

    Sets the identifier of a CPU family / model for which to build and optimize the code.

    +

    Example: RUSTFLAGS='-C target-cpu=cortex-a75'

    +
  • +
  • +

    To list all supported target CPUs for a certain target triple, use:

    +
    rustc --target=${TRIPLE} --print target-cpus
    +
    +

    Example:

    +
    rustc --target=i686-pc-windows-msvc --print target-cpus
    +
    +
  • +
  • +

    The compiler will translate this into a list of target features. Therefore, +individual feature checks (#[cfg(target_feature = "...")]) will still +work properly.

    +
  • +
  • +

    It will cause the code generator to optimize the generated code for that +specific CPU model.

    +
  • +
  • +

    Using native as the CPU model will cause Rust to generate and optimize code +for the CPU running the compiler. It is useful when building programs which you +plan to only use locally. This should never be used when the generated programs +are meant to be run on other computers, such as when packaging for distribution +or cross-compiling.

    +
  • +
+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/perf-guide/tomorrow-night.css b/perf-guide/tomorrow-night.css new file mode 100644 index 000000000..f71979258 --- /dev/null +++ b/perf-guide/tomorrow-night.css @@ -0,0 +1,104 @@ +/* Tomorrow Night Theme */ +/* http://jmblog.github.com/color-themes-for-google-code-highlightjs */ +/* Original theme - https://github.com/chriskempson/tomorrow-theme */ +/* http://jmblog.github.com/color-themes-for-google-code-highlightjs */ + +/* Tomorrow Comment */ +.hljs-comment { + color: #969896; +} + +/* Tomorrow Red */ +.hljs-variable, +.hljs-attribute, +.hljs-tag, +.hljs-regexp, +.ruby .hljs-constant, +.xml .hljs-tag .hljs-title, +.xml .hljs-pi, +.xml .hljs-doctype, +.html .hljs-doctype, +.css .hljs-id, +.css .hljs-class, +.css .hljs-pseudo { + color: #cc6666; +} + +/* Tomorrow Orange */ +.hljs-number, +.hljs-preprocessor, +.hljs-pragma, +.hljs-built_in, +.hljs-literal, +.hljs-params, +.hljs-constant { + color: #de935f; +} + +/* Tomorrow Yellow */ +.ruby .hljs-class .hljs-title, +.css .hljs-rule .hljs-attribute { + color: #f0c674; +} + +/* Tomorrow Green */ +.hljs-string, +.hljs-value, +.hljs-inheritance, +.hljs-header, +.hljs-name, +.ruby .hljs-symbol, +.xml .hljs-cdata { + color: #b5bd68; +} + +/* Tomorrow Aqua */ +.hljs-title, +.css .hljs-hexcolor { + color: #8abeb7; +} + +/* Tomorrow Blue */ +.hljs-function, +.python .hljs-decorator, +.python .hljs-title, +.ruby .hljs-function .hljs-title, +.ruby .hljs-title .hljs-keyword, +.perl .hljs-sub, +.javascript .hljs-title, +.coffeescript .hljs-title { + color: #81a2be; +} + +/* Tomorrow Purple */ +.hljs-keyword, +.javascript .hljs-function { + color: #b294bb; +} + +.hljs { + display: block; + overflow-x: auto; + background: #1d1f21; + color: #c5c8c6; + padding: 0.5em; + -webkit-text-size-adjust: none; +} + +.coffeescript .javascript, +.javascript .xml, +.tex .hljs-formula, +.xml .javascript, +.xml .vbscript, +.xml .css, +.xml .hljs-cdata { + opacity: 0.5; +} + +.hljs-addition { + color: #718c00; +} + +.hljs-deletion { + color: #c82829; +} diff --git a/perf-guide/vert-hor-ops.html b/perf-guide/vert-hor-ops.html new file mode 100644 index 000000000..4028f6f5c --- /dev/null +++ b/perf-guide/vert-hor-ops.html @@ -0,0 +1,299 @@ + + + + + + Vertical and horizontal operations - Rust SIMD Performance Guide + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + + + + + + +
+
+

Vertical and horizontal operations

+

In SIMD terminology, each vector has a certain "width" (number of lanes). +A vector processor is able to perform two kinds of operations on a vector:

+
    +
  • Vertical operations: +operate on two vectors of the same width, result has same width
  • +
+

Example: vertical addition of two f32x4 vectors

+
  %0     == | 2 | -3.5 |  0 | 7 |
+              +     +     +   +
+  %1     == | 4 |  1.5 | -1 | 0 |
+              =     =     =   =
+%0 + %1  == | 6 |  -2  | -1 | 7 |
+
+
    +
  • Horizontal operations: +reduce the elements of two vectors in some way, +the result's elements combine information from the two original ones
  • +
+

Example: horizontal addition of two u64x2 vectors

+
  %0     == | 1 |  3 |
+              └─+───┘
+                └───────┐
+                        │
+  %1     == | 4 | -1 |  │
+              └─+──┘    │
+                └───┐   │
+                    │   │
+              ┌─────│───┘
+              ▼     ▼
+%0 + %1  == | 4 |   3 |
+
+

Performance consideration of horizontal operations

+

The result of vertical operations, like vector negation: -a, for a given lane, +does not depend on the result of the operation for the other lanes. The result +of horizontal operations, like the vector sum reduction: a.sum(), depends on +the value of all vector lanes.

+

In virtually all architectures vertical operations are fast, while horizontal +operations are, by comparison, very slow.

+

Consider the following two functions for computing the sum of all f32 values +in a slice:

+

+#![allow(unused)]
+fn main() {
+fn fast_sum(x: &[f32]) -> f32 {
+    assert!(x.len() % 4 == 0);
+    let mut sum = f32x4::splat(0.); // [0., 0., 0., 0.]
+    for i in (0..x.len()).step_by(4) {
+        sum += f32x4::from_slice_unaligned(&x[i..]);
+    }
+    sum.sum()
+}
+
+fn slow_sum(x: &[f32]) -> f32 {
+    assert!(x.len() % 4 == 0);
+    let mut sum: f32 = 0.;
+    for i in (0..x.len()).step_by(4) {
+        sum += f32x4::from_slice_unaligned(&x[i..]).sum();
+    }
+    sum
+}
+}
+
+

The inner loop over the slice is where the bulk of the work actually happens. +There, the fast_sum function perform vertical operations into a vector, doing +a single horizontal reduction at the end, while the slow_sum function performs +horizontal vector operations inside of the loop.

+

On all widely-used architectures, fast_sum is a large constant factor faster +than slow_sum. You can run the slice_sum example and see for yourself. On +the particular machine tested there the algorithm using the horizontal vector +addition is 2.7x slower than the one using vertical vector operations!

+ +
+ + +
+
+ + + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/rust-logo.png b/rust-logo.png new file mode 100644 index 000000000..74b4bd695 Binary files /dev/null and b/rust-logo.png differ diff --git a/rustdoc.css b/rustdoc.css new file mode 100644 index 000000000..9d9a8d300 --- /dev/null +++ b/rustdoc.css @@ -0,0 +1 @@ + @font-face {font-family:'Fira Sans';font-style:normal;font-weight:400;src:local('Fira Sans'),url("FiraSans-Regular.woff") format('woff');}@font-face {font-family:'Fira Sans';font-style:normal;font-weight:500;src:local('Fira Sans Medium'),url("FiraSans-Medium.woff") format('woff');}@font-face {font-family:'Source Serif Pro';font-style:normal;font-weight:400;src:local('Source Serif Pro'),url("SourceSerifPro-Regular.ttf.woff") format('woff');}@font-face {font-family:'Source Serif Pro';font-style:italic;font-weight:400;src:local('Source Serif Pro Italic'),url("SourceSerifPro-It.ttf.woff") format('woff');}@font-face {font-family:'Source Serif Pro';font-style:normal;font-weight:700;src:local('Source Serif Pro Bold'),url("SourceSerifPro-Bold.ttf.woff") format('woff');}@font-face {font-family:'Source Code Pro';font-style:normal;font-weight:400;src:url("SourceCodePro-Regular.woff") format('woff');}@font-face {font-family:'Source Code Pro';font-style:normal;font-weight:600;src:url("SourceCodePro-Semibold.woff") format('woff');}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;}html{content:"";}@media (prefers-color-scheme:light){html{content:"light";}}@media (prefers-color-scheme:dark){html{content:"dark";}}body{font:16px/1.4 "Source Serif Pro",serif;margin:0;position:relative;padding:10px 15px 20px 15px;-webkit-font-feature-settings:"kern","liga";-moz-font-feature-settings:"kern","liga";font-feature-settings:"kern","liga";}h1{font-size:1.5em;}h2{font-size:1.4em;}h3{font-size:1.3em;}h1,h2,h3:not(.impl):not(.method):not(.type):not(.tymethod):not(.important),h4:not(.method):not(.type):not(.tymethod):not(.associatedconstant){font-weight:500;margin:20px 0 15px 0;padding-bottom:6px;}h1.fqn{border-bottom:1px dashed;margin-top:0;}h2,h3:not(.impl):not(.method):not(.type):not(.tymethod),h4:not(.method):not(.type):not(.tymethod):not(.associatedconstant){border-bottom:1px solid;}h3.impl,h3.method,h4.method,h3.type,h4.type,h4.associatedconstant{flex-basis:100%;font-weight:600;margin-top:16px;margin-bottom:10px;position:relative;}h3.impl,h3.method,h3.type{padding-left:15px;}h1,h2,h3,h4,.sidebar,a.source,.search-input,.content table :not(code)>a,.collapse-toggle,div.item-list .out-of-band,#source-sidebar,#sidebar-toggle{font-family:"Fira Sans",sans-serif;}ol,ul{padding-left:25px;}ul ul,ol ul,ul ol,ol ol{margin-bottom:.6em;}p{margin:0 0 .6em 0;}summary{outline:none;}code,pre,a.test-arrow{font-family:"Source Code Pro",monospace;}.docblock code,.docblock-short code{border-radius:3px;padding:0 0.1em;}.docblock pre code,.docblock-short pre code{padding:0;}pre{padding:14px;}.source .content pre{padding:20px;}img{max-width:100%;}li{position:relative;}.source .content{margin-top:50px;max-width:none;overflow:visible;margin-left:0px;min-width:70em;}nav.sub{font-size:16px;text-transform:uppercase;}.sidebar{width:200px;position:fixed;left:0;top:0;bottom:0;overflow:auto;}*{scrollbar-width:initial;}.sidebar{scrollbar-width:thin;}::-webkit-scrollbar{width:12px;}.sidebar::-webkit-scrollbar{width:8px;}::-webkit-scrollbar-track{-webkit-box-shadow:inset 0;}.sidebar .block>ul>li{margin-right:-10px;}.content,nav{max-width:960px;}.hidden{display:none !important;}.logo-container{height:100px;width:100px;position:relative;margin:20px auto;display:block;margin-top:10px;}.logo-container>img{max-width:100px;max-height:100px;position:absolute;left:50%;top:50%;transform:translate(-50%,-50%);display:block;}.sidebar .location{border:1px solid;font-size:17px;margin:30px 10px 20px 10px;text-align:center;word-wrap:break-word;}.sidebar .version{font-size:15px;text-align:center;border-bottom:1px solid;overflow-wrap:break-word;word-wrap:break-word;word-break:break-word;}.location:empty{border:none;}.location a:first-child{font-weight:500;}.block{padding:0;margin-bottom:14px;}.block h2,.block h3{margin-top:0;margin-bottom:8px;text-align:center;}.block ul,.block li{margin:0 10px;padding:0;list-style:none;}.block a{display:block;text-overflow:ellipsis;overflow:hidden;line-height:15px;padding:7px 5px;font-size:14px;font-weight:300;transition:border 500ms ease-out;}.sidebar-title{border-top:1px solid;border-bottom:1px solid;text-align:center;font-size:17px;margin-bottom:5px;}.sidebar-links{margin-bottom:15px;}.sidebar-links>a{padding-left:10px;width:100%;}.sidebar-menu{display:none;}.content{padding:15px 0;}.source .content pre.rust{white-space:pre;overflow:auto;padding-left:0;}.rustdoc:not(.source) .example-wrap{display:inline-flex;margin-bottom:10px;position:relative;}.example-wrap{width:100%;}.example-wrap>pre.line-number{overflow:initial;border:1px solid;border-top-left-radius:5px;border-bottom-left-radius:5px;padding:13px 8px;text-align:right;}.rustdoc:not(.source) .example-wrap>pre.rust{width:100%;overflow-x:auto;}.rustdoc:not(.source) .example-wrap>pre{margin:0;}#search{margin-left:230px;position:relative;}#results{position:absolute;right:0;left:0;overflow:auto;}#results>table{width:100%;table-layout:fixed;}.content pre.line-numbers{float:left;border:none;position:relative;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;}.line-numbers span{cursor:pointer;}.docblock-short p{display:inline;}.docblock-short.nowrap{display:block;overflow:hidden;white-space:nowrap;text-overflow:ellipsis;}.docblock-short p{overflow:hidden;text-overflow:ellipsis;margin:0;}.docblock code,.docblock-short code{white-space:pre-wrap;}.docblock h1,.docblock h2,.docblock h3,.docblock h4,.docblock h5{border-bottom:1px solid;}#main>.docblock h1{font-size:1.3em;}#main>.docblock h2{font-size:1.15em;}#main>.docblock h3,#main>.docblock h4,#main>.docblock h5{font-size:1em;}#main>h2+div,#main>h2+h3,#main>h3+div{display:none;flex-wrap:wrap;}.docblock h1{font-size:1em;}.docblock h2{font-size:0.95em;}.docblock h3,.docblock h4,.docblock h5{font-size:0.9em;}.docblock{margin-left:24px;position:relative;}.content .out-of-band{float:right;font-size:23px;margin:0px;padding:0px;font-weight:normal;}h3.impl>.out-of-band{font-size:21px;}h4.method>.out-of-band{font-size:19px;}h4>code,h3>code,.invisible>code{max-width:calc(100% - 41px);display:block;}.invisible{width:100%;display:inline-block;}.content .in-band{margin:0px;padding:0px;}.in-band>code{display:inline-block;}#main{position:relative;}#main>.since{top:inherit;font-family:"Fira Sans",sans-serif;}.content table:not(.table-display){border-spacing:0 5px;}.content td{vertical-align:top;}.content td:first-child{padding-right:20px;}.content td p:first-child{margin-top:0;}.content td h1,.content td h2{margin-left:0;font-size:1.1em;}.content tr:first-child td{border-top:0;}.docblock table{margin:.5em 0;width:calc(100% - 2px);border:1px dashed;}.docblock table td{padding:.5em;border:1px dashed;}.docblock table th{padding:.5em;text-align:left;border:1px solid;}.fields+table{margin-bottom:1em;}.content .item-list{list-style-type:none;padding:0;}.content .multi-column{-moz-column-count:5;-moz-column-gap:2.5em;-webkit-column-count:5;-webkit-column-gap:2.5em;column-count:5;column-gap:2.5em;}.content .multi-column li{width:100%;display:inline-block;}.content .method{font-size:1em;position:relative;}.content .method .where,.content .fn .where,.content .where.fmt-newline{display:block;font-size:0.8em;}.content .methods>div{margin-left:40px;margin-bottom:15px;}.content .docblock>.impl-items{margin-left:20px;margin-top:-34px;}.content .docblock>.impl-items>h4{border-bottom:0;}.content .docblock>.impl-items .table-display{margin:0;}.content .docblock>.impl-items table td{padding:0;}.toggle-wrapper.marg-left>.collapse-toggle{left:-24px;}.content .docblock>.impl-items .table-display,.impl-items table td{border:none;}.content .stability code{font-size:90%;}.content .stability{position:relative;margin-left:33px;margin-top:-13px;}.sub-variant>div>.stability{margin-top:initial;}.content .stability::before{content:'⬑';font-size:25px;position:absolute;top:-6px;left:-19px;}.content .impl-items .method,.content .impl-items>.type,.impl-items>.associatedconstant{margin-left:20px;}.content .impl-items .docblock,.content .impl-items .stability{margin-bottom:.6em;}.content .impl-items>.stability{margin-left:40px;}.methods>.stability,.content .impl-items>.stability{margin-top:-8px;}.impl-items{flex-basis:100%;}#main>.stability{margin-top:0;}nav:not(.sidebar){border-bottom:1px solid;padding-bottom:10px;margin-bottom:10px;}nav.main{padding:20px 0;text-align:center;}nav.main .current{border-top:1px solid;border-bottom:1px solid;}nav.main .separator{border:1px solid;display:inline-block;height:23px;margin:0 20px;}nav.sum{text-align:right;}nav.sub form{display:inline;}nav.sub,.content{margin-left:230px;}a{text-decoration:none;background:transparent;}.small-section-header:hover>.anchor{display:initial;}.in-band:hover>.anchor,.impl:hover>.anchor{display:inline-block;position:absolute;}.anchor{display:none;position:absolute;left:-7px;}.anchor.field{left:-5px;}.small-section-header>.anchor{left:-28px;padding-right:10px;}.anchor:before{content:'\2002\00a7\2002';}.docblock a:not(.srclink):not(.test-arrow):hover,.docblock-short a:not(.srclink):not(.test-arrow):hover,.stability a{text-decoration:underline;}.invisible>.srclink,h4>code+.srclink{position:absolute;top:0;right:0;font-size:17px;font-weight:normal;}.block a.current.crate{font-weight:500;}.search-container{position:relative;}.search-container>div{display:inline-flex;width:calc(100% - 34px);}#crate-search{margin-top:5px;padding:6px;padding-right:19px;flex:none;border:0;border-right:0;border-radius:4px 0 0 4px;outline:none;cursor:pointer;border-right:1px solid;-moz-appearance:none;-webkit-appearance:none;text-indent:0.01px;text-overflow:"";background-repeat:no-repeat;background-color:transparent;background-size:20px;background-position:calc(100% - 1px) 56%;}.search-container>.top-button{position:absolute;right:0;top:10px;}.search-input{-moz-box-sizing:border-box !important;box-sizing:border-box !important;outline:none;border:none;border-radius:1px;margin-top:5px;padding:10px 16px;font-size:17px;transition:border-color 300ms ease;transition:border-radius 300ms ease-in-out;transition:box-shadow 300ms ease-in-out;width:100%;}#crate-search+.search-input{border-radius:0 1px 1px 0;width:calc(100% - 32px);}.search-input:focus{border-radius:2px;border:0;outline:0;}.search-results .desc{white-space:nowrap;text-overflow:ellipsis;overflow:hidden;display:block;}.search-results a{display:block;}.content .search-results td:first-child{padding-right:0;width:50%;}.content .search-results td:first-child a{padding-right:10px;}.content .search-results td:first-child a:after{clear:both;content:"";display:block;}.content .search-results td:first-child a span{float:left;}tr.result span.primitive::after{content:' (primitive type)';font-style:italic;}tr.result span.keyword::after{content:' (keyword)';font-style:italic;}body.blur>:not(#help){filter:blur(8px);-webkit-filter:blur(8px);opacity:.7;}#help{width:100%;height:100vh;position:fixed;top:0;left:0;display:flex;justify-content:center;align-items:center;}#help>div{flex:0 0 auto;box-shadow:0 0 6px rgba(0,0,0,.2);width:550px;height:auto;border:1px solid;}#help dt{float:left;clear:left;display:block;}#help dd{margin:5px 35px;}#help .infos{padding-left:0;}#help h1,#help h2{margin-top:0;}#help>div div{width:50%;float:left;padding:20px;padding-left:17px;}.stab{display:table;border-width:1px;border-style:solid;padding:3px;margin-bottom:5px;font-size:90%;}.stab p{display:inline;}.stab summary{display:list-item;}.stab .emoji{font-size:1.5em;}.module-item .stab{border-radius:3px;display:inline-block;font-size:80%;line-height:1.2;margin-bottom:0;margin-right:.3em;padding:2px;vertical-align:text-bottom;}.module-item.unstable{opacity:0.65;}.since{font-weight:normal;font-size:initial;position:absolute;right:0;top:0;}.impl-items .since,.impl .since{flex-grow:0;padding-left:12px;padding-right:2px;position:initial;}.impl-items .srclink,.impl .srclink{flex-grow:0;font-size:17px;font-weight:normal;}.impl-items code,.impl code{flex-grow:1;}.impl-items h4,h4.impl,h3.impl{display:flex;flex-basis:100%;font-size:16px;margin-bottom:12px;justify-content:space-between;}.variants_table{width:100%;}.variants_table tbody tr td:first-child{width:1%;}td.summary-column{width:100%;}.summary{padding-right:0px;}pre.rust .question-mark{font-weight:bold;}a.test-arrow{display:inline-block;position:absolute;padding:5px 10px 5px 10px;border-radius:5px;font-size:130%;top:5px;right:5px;z-index:1;}a.test-arrow:hover{text-decoration:none;}.section-header:hover a:before{position:absolute;left:-25px;padding-right:10px;content:'\2002\00a7\2002';}.section-header:hover a{text-decoration:none;}.section-header a{color:inherit;}.collapse-toggle{font-weight:300;position:absolute;left:-23px;top:0;}h3>.collapse-toggle,h4>.collapse-toggle{font-size:0.8em;top:5px;}.toggle-wrapper>.collapse-toggle{left:-24px;margin-top:0px;}.toggle-wrapper{position:relative;margin-top:0;}.toggle-wrapper.collapsed{height:25px;transition:height .2s;margin-bottom:.6em;}.collapse-toggle>.inner{display:inline-block;width:1.2ch;text-align:center;}.collapse-toggle.hidden-default{position:relative;margin-left:20px;}.since+.srclink{display:table-cell;padding-left:10px;}.item-spacer{width:100%;height:12px;}.out-of-band>span.since{position:initial;font-size:20px;margin-right:5px;}.toggle-wrapper>.collapse-toggle{left:0;}.variant+.toggle-wrapper+.docblock>p{margin-top:5px;}.sub-variant,.sub-variant>h3{margin-top:1px !important;}#main>.sub-variant>h3{font-size:15px;margin-left:25px;margin-bottom:5px;}.sub-variant>div{margin-left:20px;margin-bottom:10px;}.sub-variant>div>span{display:block;position:relative;}.toggle-label{display:inline-block;margin-left:4px;margin-top:3px;}.enum>.toggle-wrapper+.docblock,.struct>.toggle-wrapper+.docblock{margin-left:30px;margin-bottom:20px;margin-top:5px;}.docblock>.section-header:first-child{margin-left:15px;margin-top:0;}.docblock>.section-header:first-child:hover>a:before{left:-10px;}.enum>.collapsed,.struct>.collapsed{margin-bottom:25px;}#main>.variant,#main>.structfield{display:block;}.attributes{display:block;margin-top:0px !important;margin-right:0px;margin-bottom:0px !important;margin-left:30px;}.toggle-attributes.collapsed{margin-bottom:0;}.impl-items>.toggle-attributes{margin-left:20px;}.impl-items .attributes{font-weight:500;}:target>code{opacity:1;}.information{position:absolute;left:-25px;margin-top:7px;z-index:1;}.tooltip{position:relative;display:inline-block;cursor:pointer;}.tooltip .tooltiptext{width:120px;display:none;text-align:center;padding:5px 3px 3px 3px;border-radius:6px;margin-left:5px;top:-5px;left:105%;z-index:10;font-size:16px;}.tooltip:hover .tooltiptext{display:inline;}.tooltip .tooltiptext::after{content:" ";position:absolute;top:50%;left:16px;margin-top:-5px;border-width:5px;border-style:solid;}.tooltip.compile_fail,.tooltip.should_panic,.tooltip.ignore{font-weight:bold;font-size:20px;}.tooltip .tooltiptext{border:1px solid;font-weight:normal;}pre.rust{position:relative;tab-size:4;-moz-tab-size:4;}.search-failed{text-align:center;margin-top:20px;}.search-failed>ul{text-align:left;max-width:570px;margin-left:auto;margin-right:auto;}#titles{height:35px;}#titles>div{float:left;width:33.3%;text-align:center;font-size:18px;cursor:pointer;border-top:2px solid;}#titles>div:not(:last-child){margin-right:1px;width:calc(33.3% - 1px);}#titles>div>div.count{display:inline-block;font-size:16px;}#all-types{text-align:center;border:1px solid;margin:0 10px;margin-bottom:10px;display:block;border-radius:7px;}#all-types>p{margin:5px 0;}#sidebar-toggle{position:fixed;top:30px;left:300px;z-index:10;padding:3px;border-top-right-radius:3px;border-bottom-right-radius:3px;cursor:pointer;font-weight:bold;transition:left .5s;font-size:1.2em;border:1px solid;border-left:0;}#source-sidebar{position:fixed;top:0;bottom:0;left:0;width:300px;z-index:1;overflow:auto;transition:left .5s;border-right:1px solid;}#source-sidebar>.title{font-size:1.5em;text-align:center;border-bottom:1px solid;margin-bottom:6px;}.theme-picker{position:absolute;left:211px;top:19px;}.theme-picker button{outline:none;}#settings-menu{position:absolute;right:0;top:10px;outline:none;}#theme-picker,#settings-menu{padding:4px;width:27px;height:29px;border:1px solid;border-radius:3px;cursor:pointer;}#theme-choices{display:none;position:absolute;left:0;top:28px;border:1px solid;border-radius:3px;z-index:1;cursor:pointer;}#theme-choices>button{border:none;width:100%;padding:4px;text-align:center;background:rgba(0,0,0,0);}#theme-choices>button:not(:first-child){border-top:1px solid;}@media (max-width:700px){body{padding-top:0px;}.rustdoc>.sidebar{height:45px;min-height:40px;margin:0;margin-left:-15px;padding:0 15px;position:static;z-index:11;}.sidebar>.location{float:right;margin:0px;margin-top:2px;padding:3px 10px 1px 10px;min-height:39px;background:inherit;text-align:left;font-size:24px;}.sidebar .location:empty{padding:0;}.sidebar .logo-container{width:35px;height:35px;margin-top:5px;margin-bottom:5px;float:left;margin-left:50px;}.sidebar .logo-container>img{max-width:35px;max-height:35px;}.sidebar-menu{position:fixed;z-index:10;font-size:2rem;cursor:pointer;width:45px;left:0;text-align:center;display:block;border-bottom:1px solid;border-right:1px solid;height:45px;}.rustdoc.source>.sidebar>.sidebar-menu{display:none;}.sidebar-elems{position:fixed;z-index:1;left:0;top:45px;bottom:0;overflow-y:auto;border-right:1px solid;display:none;}.sidebar>.block.version{border-bottom:none;margin-top:12px;}nav.sub{width:calc(100% - 32px);float:right;}.content{margin-left:0px;}#main{margin-top:45px;padding:0;}.content .in-band{width:100%;}.content h4>.out-of-band{position:inherit;}.toggle-wrapper>.collapse-toggle{left:0px;}.toggle-wrapper{height:1.5em;}#search{margin-left:0;}.content .impl-items .method,.content .impl-items>.type,.impl-items>.associatedconstant{display:flex;}.anchor{display:none !important;}h1.fqn{overflow:initial;}.theme-picker{left:10px;top:54px;z-index:1;}#titles>div>div.count{float:left;width:100%;}#titles{height:50px;}.sidebar.mobile{position:fixed;width:100%;margin-left:0;background-color:rgba(0,0,0,0);height:100%;}.sidebar{width:calc(100% + 30px);}.show-it{display:block;width:246px;}.show-it>.block.items{margin:8px 0;}.show-it>.block.items>ul{margin:0;}.show-it>.block.items>ul>li{text-align:center;margin:2px 0;}.show-it>.block.items>ul>li>a{font-size:21px;}#sidebar-filler{position:fixed;left:45px;width:calc(100% - 45px);top:0;height:45px;z-index:-1;border-bottom:1px solid;}.collapse-toggle{left:-20px;}.impl>.collapse-toggle{left:-10px;}#all-types{margin:10px;}#sidebar-toggle{top:100px;width:30px;font-size:1.5rem;text-align:center;padding:0;}#source-sidebar{z-index:11;}#main>.line-numbers{margin-top:0;}}@media print{nav.sub,.content .out-of-band,.collapse-toggle{display:none;}}@media (max-width:416px){#titles{height:73px;}#titles>div{height:73px;}}h3.important{margin:0;margin-bottom:13px;font-size:19px;}kbd{display:inline-block;padding:3px 5px;font:15px monospace;line-height:10px;vertical-align:middle;border:solid 1px;border-radius:3px;box-shadow:inset 0 -1px 0;cursor:default;}.hidden-by-impl-hider,.hidden-by-usual-hider{display:none !important;}#implementations-list>h3>span.in-band{width:100%;}.table-display{width:100%;border:0;border-collapse:collapse;border-spacing:0;font-size:16px;}.table-display tr td:first-child{padding-right:0;}.table-display tr td:last-child{float:right;}.table-display .out-of-band{position:relative;font-size:19px;display:block;}#implementors-list>.impl-items .table-display .out-of-band{font-size:17px;}.table-display td:hover .anchor{display:block;top:2px;left:-5px;}#main>ul{padding-left:10px;}#main>ul>li{list-style:none;}.non-exhaustive{margin-bottom:1em;}div.children{padding-left:27px;display:none;}div.name{cursor:pointer;position:relative;margin-left:16px;}div.files>a{display:block;padding:0 3px;}div.files>a:hover,div.name:hover{background-color:#a14b4b;}div.name.expand+.children{display:block;}div.name::before{content:"\25B6";padding-left:4px;font-size:0.7em;position:absolute;left:-16px;top:4px;}div.name.expand::before{transform:rotate(90deg);left:-15px;top:2px;}.type-decl>pre>.toggle-wrapper.toggle-attributes.top-attr{margin-left:0 !important;}.type-decl>pre>.docblock.attributes.top-attr{margin-left:1.8em !important;}.type-decl>pre>.toggle-attributes{margin-left:2.2em;}.type-decl>pre>.docblock.attributes{margin-left:4em;} \ No newline at end of file diff --git a/search-index.js b/search-index.js new file mode 100644 index 000000000..b4fd13dad --- /dev/null +++ b/search-index.js @@ -0,0 +1,6 @@ +var searchIndex = JSON.parse('{\ +"cfg_if":{"doc":"A macro for defining `#[cfg]` if-else statements.","i":[[14,"cfg_if","cfg_if","The main macro provided by this crate. See crate…",null,null]],"p":[]},\ +"libm":{"doc":"libm in pure Rust","i":[[5,"acos","libm","Arccosine (f64)",null,[[]]],[5,"acosf","","Arccosine (f32)",null,[[]]],[5,"acosh","","Inverse hyperbolic cosine (f64)",null,[[]]],[5,"acoshf","","Inverse hyperbolic cosine (f32)",null,[[]]],[5,"asin","","Arcsine (f64)",null,[[]]],[5,"asinf","","Arcsine (f32)",null,[[]]],[5,"asinh","","Inverse hyperbolic sine (f64)",null,[[]]],[5,"asinhf","","Inverse hyperbolic sine (f32)",null,[[]]],[5,"atan","","Arctangent (f64)",null,[[]]],[5,"atan2","","Arctangent of y/x (f64)",null,[[]]],[5,"atan2f","","Arctangent of y/x (f32)",null,[[]]],[5,"atanf","","Arctangent (f32)",null,[[]]],[5,"atanh","","Inverse hyperbolic tangent (f64)",null,[[]]],[5,"atanhf","","Inverse hyperbolic tangent (f32)",null,[[]]],[5,"cbrt","","Computes the cube root of the argument.",null,[[]]],[5,"cbrtf","","Cube root (f32)",null,[[]]],[5,"ceil","","Ceil (f64)",null,[[]]],[5,"ceilf","","Ceil (f32)",null,[[]]],[5,"copysign","","Sign of Y, magnitude of X (f64)",null,[[]]],[5,"copysignf","","Sign of Y, magnitude of X (f32)",null,[[]]],[5,"cos","","",null,[[]]],[5,"cosf","","",null,[[]]],[5,"cosh","","Hyperbolic cosine (f64)",null,[[]]],[5,"coshf","","Hyperbolic cosine (f64)",null,[[]]],[5,"erf","","Error function (f64)",null,[[]]],[5,"erfc","","Error function (f64)",null,[[]]],[5,"erfcf","","Error function (f32)",null,[[]]],[5,"erff","","Error function (f32)",null,[[]]],[5,"exp","","Exponential, base e (f64)",null,[[]]],[5,"exp10","","",null,[[]]],[5,"exp10f","","",null,[[]]],[5,"exp2","","Exponential, base 2 (f64)",null,[[]]],[5,"exp2f","","Exponential, base 2 (f32)",null,[[]]],[5,"expf","","Exponential, base e (f32)",null,[[]]],[5,"expm1","","Exponential, base e, of x-1 (f64)",null,[[]]],[5,"expm1f","","Exponential, base e, of x-1 (f32)",null,[[]]],[5,"fabs","","Absolute value (magnitude) (f64) Calculates the absolute…",null,[[]]],[5,"fabsf","","Absolute value (magnitude) (f32) Calculates the absolute…",null,[[]]],[5,"fdim","","Positive difference (f64)",null,[[]]],[5,"fdimf","","Positive difference (f32)",null,[[]]],[5,"floor","","Floor (f64)",null,[[]]],[5,"floorf","","Floor (f64)",null,[[]]],[5,"fma","","Floating multiply add (f64)",null,[[]]],[5,"fmaf","","Floating multiply add (f32)",null,[[]]],[5,"fmax","","",null,[[]]],[5,"fmaxf","","",null,[[]]],[5,"fmin","","",null,[[]]],[5,"fminf","","",null,[[]]],[5,"fmod","","",null,[[]]],[5,"fmodf","","",null,[[]]],[5,"frexp","","",null,[[]]],[5,"frexpf","","",null,[[]]],[5,"hypot","","",null,[[]]],[5,"hypotf","","",null,[[]]],[5,"ilogb","","",null,[[]]],[5,"ilogbf","","",null,[[]]],[5,"j0","","",null,[[]]],[5,"y0","","",null,[[]]],[5,"j0f","","",null,[[]]],[5,"y0f","","",null,[[]]],[5,"j1","","",null,[[]]],[5,"y1","","",null,[[]]],[5,"j1f","","",null,[[]]],[5,"y1f","","",null,[[]]],[5,"jn","","",null,[[]]],[5,"yn","","",null,[[]]],[5,"jnf","","",null,[[]]],[5,"ynf","","",null,[[]]],[5,"ldexp","","",null,[[]]],[5,"ldexpf","","",null,[[]]],[5,"lgamma","","",null,[[]]],[5,"lgamma_r","","",null,[[]]],[5,"lgammaf","","",null,[[]]],[5,"lgammaf_r","","",null,[[]]],[5,"log","","",null,[[]]],[5,"log10","","",null,[[]]],[5,"log10f","","",null,[[]]],[5,"log1p","","",null,[[]]],[5,"log1pf","","",null,[[]]],[5,"log2","","",null,[[]]],[5,"log2f","","",null,[[]]],[5,"logf","","",null,[[]]],[5,"modf","","",null,[[]]],[5,"modff","","",null,[[]]],[5,"pow","","",null,[[]]],[5,"powf","","",null,[[]]],[5,"remquo","","",null,[[]]],[5,"remquof","","",null,[[]]],[5,"round","","",null,[[]]],[5,"roundf","","",null,[[]]],[5,"scalbn","","",null,[[]]],[5,"scalbnf","","",null,[[]]],[5,"sin","","",null,[[]]],[5,"sincos","","",null,[[]]],[5,"sincosf","","",null,[[]]],[5,"sinf","","",null,[[]]],[5,"sinh","","",null,[[]]],[5,"sinhf","","",null,[[]]],[5,"sqrt","","",null,[[]]],[5,"sqrtf","","",null,[[]]],[5,"tan","","",null,[[]]],[5,"tanf","","",null,[[]]],[5,"tanh","","",null,[[]]],[5,"tanhf","","",null,[[]]],[5,"tgamma","","",null,[[]]],[5,"tgammaf","","",null,[[]]],[5,"trunc","","",null,[[]]],[5,"truncf","","",null,[[]]],[8,"F32Ext","","Math support for `f32`",null,null],[10,"floor","","",0,[[]]],[10,"ceil","","",0,[[]]],[10,"round","","",0,[[]]],[10,"trunc","","",0,[[]]],[10,"fdim","","",0,[[]]],[10,"fract","","",0,[[]]],[10,"abs","","",0,[[]]],[10,"mul_add","","",0,[[]]],[10,"div_euc","","",0,[[]]],[10,"mod_euc","","",0,[[]]],[10,"powf","","",0,[[]]],[10,"sqrt","","",0,[[]]],[10,"exp","","",0,[[]]],[10,"exp2","","",0,[[]]],[10,"ln","","",0,[[]]],[10,"log","","",0,[[]]],[10,"log2","","",0,[[]]],[10,"log10","","",0,[[]]],[10,"cbrt","","",0,[[]]],[10,"hypot","","",0,[[]]],[10,"sin","","",0,[[]]],[10,"cos","","",0,[[]]],[10,"tan","","",0,[[]]],[10,"asin","","",0,[[]]],[10,"acos","","",0,[[]]],[10,"atan","","",0,[[]]],[10,"atan2","","",0,[[]]],[10,"sin_cos","","",0,[[]]],[10,"exp_m1","","",0,[[]]],[10,"ln_1p","","",0,[[]]],[10,"sinh","","",0,[[]]],[10,"cosh","","",0,[[]]],[10,"tanh","","",0,[[]]],[10,"asinh","","",0,[[]]],[10,"acosh","","",0,[[]]],[10,"atanh","","",0,[[]]],[10,"min","","",0,[[]]],[10,"max","","",0,[[]]],[8,"F64Ext","","Math support for `f64`",null,null],[10,"floor","","",1,[[]]],[10,"ceil","","",1,[[]]],[10,"round","","",1,[[]]],[10,"trunc","","",1,[[]]],[10,"fdim","","",1,[[]]],[10,"fract","","",1,[[]]],[10,"abs","","",1,[[]]],[10,"mul_add","","",1,[[]]],[10,"div_euc","","",1,[[]]],[10,"mod_euc","","",1,[[]]],[10,"powf","","",1,[[]]],[10,"sqrt","","",1,[[]]],[10,"exp","","",1,[[]]],[10,"exp2","","",1,[[]]],[10,"ln","","",1,[[]]],[10,"log","","",1,[[]]],[10,"log2","","",1,[[]]],[10,"log10","","",1,[[]]],[10,"cbrt","","",1,[[]]],[10,"hypot","","",1,[[]]],[10,"sin","","",1,[[]]],[10,"cos","","",1,[[]]],[10,"tan","","",1,[[]]],[10,"asin","","",1,[[]]],[10,"acos","","",1,[[]]],[10,"atan","","",1,[[]]],[10,"atan2","","",1,[[]]],[10,"sin_cos","","",1,[[]]],[10,"exp_m1","","",1,[[]]],[10,"ln_1p","","",1,[[]]],[10,"sinh","","",1,[[]]],[10,"cosh","","",1,[[]]],[10,"tanh","","",1,[[]]],[10,"asinh","","",1,[[]]],[10,"acosh","","",1,[[]]],[10,"atanh","","",1,[[]]],[10,"min","","",1,[[]]],[10,"max","","",1,[[]]]],"p":[[8,"F32Ext"],[8,"F64Ext"]]},\ +"packed_simd":{"doc":"Portable packed SIMD vectors","i":[[3,"Simd","packed_simd","Packed SIMD vector type.",null,null],[3,"LexicographicallyOrdered","","Wrapper over `T` implementing a lexicoraphical order via…",null,null],[3,"m8","","8-bit wide mask.",null,null],[3,"m16","","16-bit wide mask.",null,null],[3,"m32","","32-bit wide mask.",null,null],[3,"m64","","64-bit wide mask.",null,null],[3,"m128","","128-bit wide mask.",null,null],[3,"msize","","isize-wide mask.",null,null],[11,"new","","Instantiate a mask with `value`",0,[[]]],[11,"test","","Test if the mask is set",0,[[]]],[11,"new","","Instantiate a mask with `value`",1,[[]]],[11,"test","","Test if the mask is set",1,[[]]],[11,"new","","Instantiate a mask with `value`",2,[[]]],[11,"test","","Test if the mask is set",2,[[]]],[11,"new","","Instantiate a mask with `value`",3,[[]]],[11,"test","","Test if the mask is set",3,[[]]],[11,"new","","Instantiate a mask with `value`",4,[[]]],[11,"test","","Test if the mask is set",4,[[]]],[11,"new","","Instantiate a mask with `value`",5,[[]]],[11,"test","","Test if the mask is set",5,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i8x2",6]],["i8x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i8x2",6]],["i8x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i8x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i8x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u8x2",6]],["u8x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u8x2",6]],["u8x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u8x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u8x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m8x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m8x2",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i8x4",6]],["i8x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i8x4",6]],["i8x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i8x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i8x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u8x4",6]],["u8x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u8x4",6]],["u8x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["u8x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["u8x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m8x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m8x4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i16x2",6]],["i16x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i16x2",6]],["i16x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i16x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i16x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u16x2",6]],["u16x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u16x2",6]],["u16x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["u16x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["u16x2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m16x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m16x2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i8x8",6]],["i8x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i8x8",6]],["i8x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i8x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i8x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u8x8",6]],["u8x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u8x8",6]],["u8x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u8x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u8x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m8x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m8x8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i16x4",6]],["i16x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i16x4",6]],["i16x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i16x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i16x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u16x4",6]],["u16x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u16x4",6]],["u16x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u16x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u16x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m16x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m16x4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i32x2",6]],["i32x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i32x2",6]],["i32x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i32x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i32x2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u32x2",6]],["u32x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u32x2",6]],["u32x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u32x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u32x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m32x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m32x2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"sum","","Horizontal sum of the vector elements.",6,[[]]],[11,"product","","Horizontal product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[18,"EPSILON","","Machine epsilon value.",6,null],[18,"MIN","","Smallest finite value.",6,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",6,null],[18,"MAX","","Largest finite value.",6,null],[18,"NAN","","Not a Number (NaN).",6,null],[18,"INFINITY","","Infinity (∞).",6,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",6,null],[18,"PI","","Archimedes\' constant (π)",6,null],[18,"FRAC_PI_2","","π/2",6,null],[18,"FRAC_PI_3","","π/3",6,null],[18,"FRAC_PI_4","","π/4",6,null],[18,"FRAC_PI_6","","π/6",6,null],[18,"FRAC_PI_8","","π/8",6,null],[18,"FRAC_1_PI","","1/π",6,null],[18,"FRAC_2_PI","","2/π",6,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",6,null],[18,"SQRT_2","","sqrt(2)",6,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",6,null],[18,"E","","Euler\'s number (e)",6,null],[18,"LOG2_E","","log2(e)",6,null],[18,"LOG10_E","","log10(e)",6,null],[18,"LN_2","","ln(2)",6,null],[18,"LN_10","","ln(10)",6,null],[11,"is_nan","","",6,[[],["m32x2",6]]],[11,"is_infinite","","",6,[[],["m32x2",6]]],[11,"is_finite","","",6,[[],["m32x2",6]]],[11,"abs","","Absolute value.",6,[[]]],[11,"cos","","Cosine.",6,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",6,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",6,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",6,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",6,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",6,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",6,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",6,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",6,[[]]],[11,"sin","","Sine.",6,[[]]],[11,"sin_pi","","Sine of `self * PI`.",6,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",6,[[]]],[11,"sqrt","","",6,[[]]],[11,"sqrte","","Square-root estimate.",6,[[]]],[11,"tanh","","Tanh.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x2",6]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i8x16",6]],["i8x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i8x16",6]],["i8x16",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i8x16",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i8x16",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u8x16",6]],["u8x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u8x16",6]],["u8x16",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u8x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u8x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x16",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m8x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m8x16",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i16x8",6]],["i16x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i16x8",6]],["i16x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i16x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i16x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u16x8",6]],["u16x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u16x8",6]],["u16x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u16x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u16x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m16x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m16x8",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i32x4",6]],["i32x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i32x4",6]],["i32x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i32x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i32x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u32x4",6]],["u32x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u32x4",6]],["u32x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["u32x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["u32x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"sum","","Horizontal sum of the vector elements.",6,[[]]],[11,"product","","Horizontal product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[18,"EPSILON","","Machine epsilon value.",6,null],[18,"MIN","","Smallest finite value.",6,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",6,null],[18,"MAX","","Largest finite value.",6,null],[18,"NAN","","Not a Number (NaN).",6,null],[18,"INFINITY","","Infinity (∞).",6,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",6,null],[18,"PI","","Archimedes\' constant (π)",6,null],[18,"FRAC_PI_2","","π/2",6,null],[18,"FRAC_PI_3","","π/3",6,null],[18,"FRAC_PI_4","","π/4",6,null],[18,"FRAC_PI_6","","π/6",6,null],[18,"FRAC_PI_8","","π/8",6,null],[18,"FRAC_1_PI","","1/π",6,null],[18,"FRAC_2_PI","","2/π",6,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",6,null],[18,"SQRT_2","","sqrt(2)",6,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",6,null],[18,"E","","Euler\'s number (e)",6,null],[18,"LOG2_E","","log2(e)",6,null],[18,"LOG10_E","","log10(e)",6,null],[18,"LN_2","","ln(2)",6,null],[18,"LN_10","","ln(10)",6,null],[11,"is_nan","","",6,[[],["m32x4",6]]],[11,"is_infinite","","",6,[[],["m32x4",6]]],[11,"is_finite","","",6,[[],["m32x4",6]]],[11,"abs","","Absolute value.",6,[[]]],[11,"cos","","Cosine.",6,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",6,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",6,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",6,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",6,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",6,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",6,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",6,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",6,[[]]],[11,"sin","","Sine.",6,[[]]],[11,"sin_pi","","Sine of `self * PI`.",6,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",6,[[]]],[11,"sqrt","","",6,[[]]],[11,"sqrte","","Square-root estimate.",6,[[]]],[11,"tanh","","Tanh.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m32x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m32x4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i64x2",6]],["i64x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i64x2",6]],["i64x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i64x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i64x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u64x2",6]],["u64x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u64x2",6]],["u64x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u64x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u64x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"sum","","Horizontal sum of the vector elements.",6,[[]]],[11,"product","","Horizontal product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[18,"EPSILON","","Machine epsilon value.",6,null],[18,"MIN","","Smallest finite value.",6,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",6,null],[18,"MAX","","Largest finite value.",6,null],[18,"NAN","","Not a Number (NaN).",6,null],[18,"INFINITY","","Infinity (∞).",6,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",6,null],[18,"PI","","Archimedes\' constant (π)",6,null],[18,"FRAC_PI_2","","π/2",6,null],[18,"FRAC_PI_3","","π/3",6,null],[18,"FRAC_PI_4","","π/4",6,null],[18,"FRAC_PI_6","","π/6",6,null],[18,"FRAC_PI_8","","π/8",6,null],[18,"FRAC_1_PI","","1/π",6,null],[18,"FRAC_2_PI","","2/π",6,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",6,null],[18,"SQRT_2","","sqrt(2)",6,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",6,null],[18,"E","","Euler\'s number (e)",6,null],[18,"LOG2_E","","log2(e)",6,null],[18,"LOG10_E","","log10(e)",6,null],[18,"LN_2","","ln(2)",6,null],[18,"LN_10","","ln(10)",6,null],[11,"is_nan","","",6,[[],["m64x2",6]]],[11,"is_infinite","","",6,[[],["m64x2",6]]],[11,"is_finite","","",6,[[],["m64x2",6]]],[11,"abs","","Absolute value.",6,[[]]],[11,"cos","","Cosine.",6,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",6,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",6,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",6,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",6,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",6,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",6,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",6,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",6,[[]]],[11,"sin","","Sine.",6,[[]]],[11,"sin_pi","","Sine of `self * PI`.",6,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",6,[[]]],[11,"sqrt","","",6,[[]]],[11,"sqrte","","Square-root estimate.",6,[[]]],[11,"tanh","","Tanh.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m64x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m64x2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i128x1",6]],["i128x1",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i128x1",6]],["i128x1",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x1",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x1",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x1",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x1",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x1",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x1",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i128x1",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i128x1",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u128x1",6]],["u128x1",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u128x1",6]],["u128x1",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x1",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x1",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x1",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x1",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x1",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x1",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u128x1",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u128x1",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x1",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x1",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x1",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x1",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x1",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x1",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m128x1",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m128x1",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i8x32",6]],["i8x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i8x32",6]],["i8x32",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x32",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x32",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i8x32",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i8x32",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u8x32",6]],["u8x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u8x32",6]],["u8x32",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x32",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x32",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u8x32",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u8x32",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x32",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x32",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x32",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m8x32",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m8x32",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i16x16",6]],["i16x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i16x16",6]],["i16x16",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i16x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i16x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u16x16",6]],["u16x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u16x16",6]],["u16x16",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u16x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u16x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x16",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m16x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m16x16",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i32x8",6]],["i32x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i32x8",6]],["i32x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i32x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i32x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u32x8",6]],["u32x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u32x8",6]],["u32x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["u32x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["u32x8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"sum","","Horizontal sum of the vector elements.",6,[[]]],[11,"product","","Horizontal product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[18,"EPSILON","","Machine epsilon value.",6,null],[18,"MIN","","Smallest finite value.",6,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",6,null],[18,"MAX","","Largest finite value.",6,null],[18,"NAN","","Not a Number (NaN).",6,null],[18,"INFINITY","","Infinity (∞).",6,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",6,null],[18,"PI","","Archimedes\' constant (π)",6,null],[18,"FRAC_PI_2","","π/2",6,null],[18,"FRAC_PI_3","","π/3",6,null],[18,"FRAC_PI_4","","π/4",6,null],[18,"FRAC_PI_6","","π/6",6,null],[18,"FRAC_PI_8","","π/8",6,null],[18,"FRAC_1_PI","","1/π",6,null],[18,"FRAC_2_PI","","2/π",6,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",6,null],[18,"SQRT_2","","sqrt(2)",6,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",6,null],[18,"E","","Euler\'s number (e)",6,null],[18,"LOG2_E","","log2(e)",6,null],[18,"LOG10_E","","log10(e)",6,null],[18,"LN_2","","ln(2)",6,null],[18,"LN_10","","ln(10)",6,null],[11,"is_nan","","",6,[[],["m32x8",6]]],[11,"is_infinite","","",6,[[],["m32x8",6]]],[11,"is_finite","","",6,[[],["m32x8",6]]],[11,"abs","","Absolute value.",6,[[]]],[11,"cos","","Cosine.",6,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",6,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",6,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",6,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",6,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",6,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",6,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",6,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",6,[[]]],[11,"sin","","Sine.",6,[[]]],[11,"sin_pi","","Sine of `self * PI`.",6,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",6,[[]]],[11,"sqrt","","",6,[[]]],[11,"sqrte","","Square-root estimate.",6,[[]]],[11,"tanh","","Tanh.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m32x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m32x8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i64x4",6]],["i64x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i64x4",6]],["i64x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i64x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i64x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u64x4",6]],["u64x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u64x4",6]],["u64x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u64x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u64x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"sum","","Horizontal sum of the vector elements.",6,[[]]],[11,"product","","Horizontal product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[18,"EPSILON","","Machine epsilon value.",6,null],[18,"MIN","","Smallest finite value.",6,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",6,null],[18,"MAX","","Largest finite value.",6,null],[18,"NAN","","Not a Number (NaN).",6,null],[18,"INFINITY","","Infinity (∞).",6,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",6,null],[18,"PI","","Archimedes\' constant (π)",6,null],[18,"FRAC_PI_2","","π/2",6,null],[18,"FRAC_PI_3","","π/3",6,null],[18,"FRAC_PI_4","","π/4",6,null],[18,"FRAC_PI_6","","π/6",6,null],[18,"FRAC_PI_8","","π/8",6,null],[18,"FRAC_1_PI","","1/π",6,null],[18,"FRAC_2_PI","","2/π",6,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",6,null],[18,"SQRT_2","","sqrt(2)",6,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",6,null],[18,"E","","Euler\'s number (e)",6,null],[18,"LOG2_E","","log2(e)",6,null],[18,"LOG10_E","","log10(e)",6,null],[18,"LN_2","","ln(2)",6,null],[18,"LN_10","","ln(10)",6,null],[11,"is_nan","","",6,[[],["m64x4",6]]],[11,"is_infinite","","",6,[[],["m64x4",6]]],[11,"is_finite","","",6,[[],["m64x4",6]]],[11,"abs","","Absolute value.",6,[[]]],[11,"cos","","Cosine.",6,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",6,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",6,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",6,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",6,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",6,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",6,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",6,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",6,[[]]],[11,"sin","","Sine.",6,[[]]],[11,"sin_pi","","Sine of `self * PI`.",6,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",6,[[]]],[11,"sqrt","","",6,[[]]],[11,"sqrte","","Square-root estimate.",6,[[]]],[11,"tanh","","Tanh.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m64x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m64x4",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i128x2",6]],["i128x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i128x2",6]],["i128x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["i128x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["i128x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u128x2",6]],["u128x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u128x2",6]],["u128x2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u128x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u128x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m128x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m128x2",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i8x64",6]],["i8x64",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i8x64",6]],["i8x64",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x64",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x64",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x64",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x64",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x64",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x64",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i8x64",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i8x64",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u8x64",6]],["u8x64",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u8x64",6]],["u8x64",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x64",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x64",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x64",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x64",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x64",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x64",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["u8x64",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["u8x64",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m8x64",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m8x64",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m8x64",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m8x64",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m8x64",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m8x64",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m8x64",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m8x64",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i16x32",6]],["i16x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i16x32",6]],["i16x32",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x32",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x32",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i16x32",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i16x32",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u16x32",6]],["u16x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u16x32",6]],["u16x32",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x32",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x32",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["u16x32",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["u16x32",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m16x32",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m16x32",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m16x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m16x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m16x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m16x32",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m16x32",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m16x32",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i32x16",6]],["i32x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i32x16",6]],["i32x16",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i32x16",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i32x16",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u32x16",6]],["u32x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u32x16",6]],["u32x16",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u32x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u32x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"sum","","Horizontal sum of the vector elements.",6,[[]]],[11,"product","","Horizontal product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[18,"EPSILON","","Machine epsilon value.",6,null],[18,"MIN","","Smallest finite value.",6,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",6,null],[18,"MAX","","Largest finite value.",6,null],[18,"NAN","","Not a Number (NaN).",6,null],[18,"INFINITY","","Infinity (∞).",6,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",6,null],[18,"PI","","Archimedes\' constant (π)",6,null],[18,"FRAC_PI_2","","π/2",6,null],[18,"FRAC_PI_3","","π/3",6,null],[18,"FRAC_PI_4","","π/4",6,null],[18,"FRAC_PI_6","","π/6",6,null],[18,"FRAC_PI_8","","π/8",6,null],[18,"FRAC_1_PI","","1/π",6,null],[18,"FRAC_2_PI","","2/π",6,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",6,null],[18,"SQRT_2","","sqrt(2)",6,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",6,null],[18,"E","","Euler\'s number (e)",6,null],[18,"LOG2_E","","log2(e)",6,null],[18,"LOG10_E","","log10(e)",6,null],[18,"LN_2","","ln(2)",6,null],[18,"LN_10","","ln(10)",6,null],[11,"is_nan","","",6,[[],["m32x16",6]]],[11,"is_infinite","","",6,[[],["m32x16",6]]],[11,"is_finite","","",6,[[],["m32x16",6]]],[11,"abs","","Absolute value.",6,[[]]],[11,"cos","","Cosine.",6,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",6,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",6,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",6,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",6,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",6,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",6,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",6,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",6,[[]]],[11,"sin","","Sine.",6,[[]]],[11,"sin_pi","","Sine of `self * PI`.",6,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",6,[[]]],[11,"sqrt","","",6,[[]]],[11,"sqrte","","Square-root estimate.",6,[[]]],[11,"tanh","","Tanh.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m32x16",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["m32x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["m32x16",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i64x8",6]],["i64x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i64x8",6]],["i64x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i64x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i64x8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u64x8",6]],["u64x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u64x8",6]],["u64x8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["u64x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["u64x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"sum","","Horizontal sum of the vector elements.",6,[[]]],[11,"product","","Horizontal product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[18,"EPSILON","","Machine epsilon value.",6,null],[18,"MIN","","Smallest finite value.",6,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",6,null],[18,"MAX","","Largest finite value.",6,null],[18,"NAN","","Not a Number (NaN).",6,null],[18,"INFINITY","","Infinity (∞).",6,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",6,null],[18,"PI","","Archimedes\' constant (π)",6,null],[18,"FRAC_PI_2","","π/2",6,null],[18,"FRAC_PI_3","","π/3",6,null],[18,"FRAC_PI_4","","π/4",6,null],[18,"FRAC_PI_6","","π/6",6,null],[18,"FRAC_PI_8","","π/8",6,null],[18,"FRAC_1_PI","","1/π",6,null],[18,"FRAC_2_PI","","2/π",6,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",6,null],[18,"SQRT_2","","sqrt(2)",6,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",6,null],[18,"E","","Euler\'s number (e)",6,null],[18,"LOG2_E","","log2(e)",6,null],[18,"LOG10_E","","log10(e)",6,null],[18,"LN_2","","ln(2)",6,null],[18,"LN_10","","ln(10)",6,null],[11,"is_nan","","",6,[[],["m64x8",6]]],[11,"is_infinite","","",6,[[],["m64x8",6]]],[11,"is_finite","","",6,[[],["m64x8",6]]],[11,"abs","","Absolute value.",6,[[]]],[11,"cos","","Cosine.",6,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",6,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",6,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",6,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",6,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",6,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",6,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",6,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",6,[[]]],[11,"sin","","Sine.",6,[[]]],[11,"sin_pi","","Sine of `self * PI`.",6,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",6,[[]]],[11,"sqrt","","",6,[[]]],[11,"sqrte","","Square-root estimate.",6,[[]]],[11,"tanh","","Tanh.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m64x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m64x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m64x8",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["i128x4",6]],["i128x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["i128x4",6]],["i128x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["i128x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["i128x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["u128x4",6]],["u128x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["u128x4",6]],["u128x4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["u128x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["u128x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["m128x4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["m128x4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["m128x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["m128x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["m128x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["m128x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["m128x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["m128x4",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["isizex2",6]],["isizex2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["isizex2",6]],["isizex2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["isizex2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["isizex2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["usizex2",6]],["usizex2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["usizex2",6]],["usizex2",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["usizex2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["usizex2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["msizex2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["msizex2",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["isizex4",6]],["isizex4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["isizex4",6]],["isizex4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["isizex4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["isizex4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["usizex4",6]],["usizex4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["usizex4",6]],["usizex4",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["usizex4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["usizex4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["msizex4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["msizex4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["isizex8",6]],["isizex8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["isizex8",6]],["isizex8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["isizex8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["isizex8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",6,[[["usizex8",6]],["usizex8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",6,[[["usizex8",6]],["usizex8",6]]],[11,"min","","Minimum of two vectors.",6,[[]]],[11,"max","","Maximum of two vectors.",6,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",6,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",6,[[]]],[11,"max_element","","Largest vector element value.",6,[[]]],[11,"min_element","","Smallest vector element value.",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",6,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",6,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",6,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",6,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",6,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",6,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",6,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",6,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",6,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["usizex8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["usizex8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",6,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",6,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",6,[[]]],[11,"all","","Are `all` vector lanes `true`?",6,[[]]],[11,"any","","Is `any` vector lane `true`?",6,[[]]],[11,"none","","Are `all` vector lanes `false`?",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",6,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",6,[[],[["lexicographicallyordered",3],["msizex8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",6,[[],[["lexicographicallyordered",3],["msizex8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",6,[[]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",6,[[],["msizex2",6]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"offset","","Calculates the offset from a pointer.",6,[[["isizex2",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",6,[[["isizex2",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",6,[[],["isizex2",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",6,[[],["isizex2",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",6,[[["usizex2",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",6,[[["usizex2",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",6,[[["usizex2",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",6,[[["usizex2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"read","","Reads selected vector elements from memory.",6,[[["simd",3],["simd",3]],["simd",3]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",6,[[],["msizex2",6]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex2",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"offset","","Calculates the offset from a pointer.",6,[[["isizex2",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",6,[[["isizex2",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",6,[[],["isizex2",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",6,[[],["isizex2",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",6,[[["usizex2",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",6,[[["usizex2",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",6,[[["usizex2",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",6,[[["usizex2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"read","","Reads selected vector elements from memory.",6,[[["simd",3],["simd",3]],["simd",3]]],[11,"write","","Writes selected vector elements to memory.",6,[[["simd",3],["simd",3]]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",6,[[],["msizex4",6]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"offset","","Calculates the offset from a pointer.",6,[[["isizex4",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",6,[[["isizex4",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",6,[[],["isizex4",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",6,[[],["isizex4",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",6,[[["usizex4",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",6,[[["usizex4",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",6,[[["usizex4",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",6,[[["usizex4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"read","","Reads selected vector elements from memory.",6,[[["simd",3],["simd",3]],["simd",3]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",6,[[],["msizex4",6]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex4",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"offset","","Calculates the offset from a pointer.",6,[[["isizex4",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",6,[[["isizex4",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",6,[[],["isizex4",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",6,[[],["isizex4",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",6,[[["usizex4",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",6,[[["usizex4",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",6,[[["usizex4",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",6,[[["usizex4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"read","","Reads selected vector elements from memory.",6,[[["simd",3],["simd",3]],["simd",3]]],[11,"write","","Writes selected vector elements to memory.",6,[[["simd",3],["simd",3]]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",6,[[],["msizex8",6]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"offset","","Calculates the offset from a pointer.",6,[[["isizex8",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",6,[[["isizex8",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",6,[[],["isizex8",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",6,[[],["isizex8",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",6,[[["usizex8",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",6,[[["usizex8",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",6,[[["usizex8",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",6,[[["usizex8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"read","","Reads selected vector elements from memory.",6,[[["simd",3],["simd",3]],["simd",3]]],[11,"new","","Creates a new instance with each vector elements…",6,[[]]],[11,"lanes","","Returns the number of vector lanes.",6,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",6,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",6,[[],["msizex8",6]]],[11,"extract","","Extracts the value at `index`.",6,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",6,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",6,[[]]],[11,"eq","","Lane-wise equality comparison.",6,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",6,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",6,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",6,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",6,[[],["msizex8",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",6,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",6,[[]]],[11,"offset","","Calculates the offset from a pointer.",6,[[["isizex8",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",6,[[["isizex8",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",6,[[],["isizex8",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",6,[[],["isizex8",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",6,[[["usizex8",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",6,[[["usizex8",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",6,[[["usizex8",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",6,[[["usizex8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",6,[[]]],[11,"read","","Reads selected vector elements from memory.",6,[[["simd",3],["simd",3]],["simd",3]]],[11,"write","","Writes selected vector elements to memory.",6,[[["simd",3],["simd",3]]]],[6,"i8x2","","A 16-bit vector with 2 `i8` lanes.",null,null],[6,"u8x2","","A 16-bit vector with 2 `u8` lanes.",null,null],[6,"m8x2","","A 16-bit vector mask with 2 `m8` lanes.",null,null],[6,"i8x4","","A 32-bit vector with 4 `i8` lanes.",null,null],[6,"u8x4","","A 32-bit vector with 4 `u8` lanes.",null,null],[6,"m8x4","","A 32-bit vector mask with 4 `m8` lanes.",null,null],[6,"i16x2","","A 32-bit vector with 2 `i16` lanes.",null,null],[6,"u16x2","","A 32-bit vector with 2 `u16` lanes.",null,null],[6,"m16x2","","A 32-bit vector mask with 2 `m16` lanes.",null,null],[6,"i8x8","","A 64-bit vector with 8 `i8` lanes.",null,null],[6,"u8x8","","A 64-bit vector with 8 `u8` lanes.",null,null],[6,"m8x8","","A 64-bit vector mask with 8 `m8` lanes.",null,null],[6,"i16x4","","A 64-bit vector with 4 `i16` lanes.",null,null],[6,"u16x4","","A 64-bit vector with 4 `u16` lanes.",null,null],[6,"m16x4","","A 64-bit vector mask with 4 `m16` lanes.",null,null],[6,"i32x2","","A 64-bit vector with 2 `i32` lanes.",null,null],[6,"u32x2","","A 64-bit vector with 2 `u32` lanes.",null,null],[6,"m32x2","","A 64-bit vector mask with 2 `m32` lanes.",null,null],[6,"f32x2","","A 64-bit vector with 2 `f32` lanes.",null,null],[6,"i8x16","","A 128-bit vector with 16 `i8` lanes.",null,null],[6,"u8x16","","A 128-bit vector with 16 `u8` lanes.",null,null],[6,"m8x16","","A 128-bit vector mask with 16 `m8` lanes.",null,null],[6,"i16x8","","A 128-bit vector with 8 `i16` lanes.",null,null],[6,"u16x8","","A 128-bit vector with 8 `u16` lanes.",null,null],[6,"m16x8","","A 128-bit vector mask with 8 `m16` lanes.",null,null],[6,"i32x4","","A 128-bit vector with 4 `i32` lanes.",null,null],[6,"u32x4","","A 128-bit vector with 4 `u32` lanes.",null,null],[6,"f32x4","","A 128-bit vector with 4 `f32` lanes.",null,null],[6,"m32x4","","A 128-bit vector mask with 4 `m32` lanes.",null,null],[6,"i64x2","","A 128-bit vector with 2 `i64` lanes.",null,null],[6,"u64x2","","A 128-bit vector with 2 `u64` lanes.",null,null],[6,"f64x2","","A 128-bit vector with 2 `f64` lanes.",null,null],[6,"m64x2","","A 128-bit vector mask with 2 `m64` lanes.",null,null],[6,"i128x1","","A 128-bit vector with 1 `i128` lane.",null,null],[6,"u128x1","","A 128-bit vector with 1 `u128` lane.",null,null],[6,"m128x1","","A 128-bit vector mask with 1 `m128` lane.",null,null],[6,"i8x32","","A 256-bit vector with 32 `i8` lanes.",null,null],[6,"u8x32","","A 256-bit vector with 32 `u8` lanes.",null,null],[6,"m8x32","","A 256-bit vector mask with 32 `m8` lanes.",null,null],[6,"i16x16","","A 256-bit vector with 16 `i16` lanes.",null,null],[6,"u16x16","","A 256-bit vector with 16 `u16` lanes.",null,null],[6,"m16x16","","A 256-bit vector mask with 16 `m16` lanes.",null,null],[6,"i32x8","","A 256-bit vector with 8 `i32` lanes.",null,null],[6,"u32x8","","A 256-bit vector with 8 `u32` lanes.",null,null],[6,"f32x8","","A 256-bit vector with 8 `f32` lanes.",null,null],[6,"m32x8","","A 256-bit vector mask with 8 `m32` lanes.",null,null],[6,"i64x4","","A 256-bit vector with 4 `i64` lanes.",null,null],[6,"u64x4","","A 256-bit vector with 4 `u64` lanes.",null,null],[6,"f64x4","","A 256-bit vector with 4 `f64` lanes.",null,null],[6,"m64x4","","A 256-bit vector mask with 4 `m64` lanes.",null,null],[6,"i128x2","","A 256-bit vector with 2 `i128` lanes.",null,null],[6,"u128x2","","A 256-bit vector with 2 `u128` lanes.",null,null],[6,"m128x2","","A 256-bit vector mask with 2 `m128` lanes.",null,null],[6,"i8x64","","A 512-bit vector with 64 `i8` lanes.",null,null],[6,"u8x64","","A 512-bit vector with 64 `u8` lanes.",null,null],[6,"m8x64","","A 512-bit vector mask with 64 `m8` lanes.",null,null],[6,"i16x32","","A 512-bit vector with 32 `i16` lanes.",null,null],[6,"u16x32","","A 512-bit vector with 32 `u16` lanes.",null,null],[6,"m16x32","","A 512-bit vector mask with 32 `m16` lanes.",null,null],[6,"i32x16","","A 512-bit vector with 16 `i32` lanes.",null,null],[6,"u32x16","","A 512-bit vector with 16 `u32` lanes.",null,null],[6,"f32x16","","A 512-bit vector with 16 `f32` lanes.",null,null],[6,"m32x16","","A 512-bit vector mask with 16 `m32` lanes.",null,null],[6,"i64x8","","A 512-bit vector with 8 `i64` lanes.",null,null],[6,"u64x8","","A 512-bit vector with 8 `u64` lanes.",null,null],[6,"f64x8","","A 512-bit vector with 8 `f64` lanes.",null,null],[6,"m64x8","","A 512-bit vector mask with 8 `m64` lanes.",null,null],[6,"i128x4","","A 512-bit vector with 4 `i128` lanes.",null,null],[6,"u128x4","","A 512-bit vector with 4 `u128` lanes.",null,null],[6,"m128x4","","A 512-bit vector mask with 4 `m128` lanes.",null,null],[6,"isizex2","","A vector with 2 `isize` lanes.",null,null],[6,"usizex2","","A vector with 2 `usize` lanes.",null,null],[6,"msizex2","","A vector mask with 2 `msize` lanes.",null,null],[6,"isizex4","","A vector with 4 `isize` lanes.",null,null],[6,"usizex4","","A vector with 4 `usize` lanes.",null,null],[6,"msizex4","","A vector mask with 4 `msize` lanes.",null,null],[6,"isizex8","","A vector with 4 `isize` lanes.",null,null],[6,"usizex8","","A vector with 8 `usize` lanes.",null,null],[6,"msizex8","","A vector mask with 8 `msize` lanes.",null,null],[6,"cptrx2","","A vector with 2 `*const T` lanes",null,null],[6,"mptrx2","","A vector with 2 `*mut T` lanes",null,null],[6,"cptrx4","","A vector with 4 `*const T` lanes",null,null],[6,"mptrx4","","A vector with 4 `*mut T` lanes",null,null],[6,"cptrx8","","A vector with 8 `*const T` lanes",null,null],[6,"mptrx8","","A vector with 8 `*mut T` lanes",null,null],[8,"SimdVector","","This trait is implemented by all SIMD vector types.",null,null],[16,"Element","","Element type of the SIMD vector",7,null],[18,"LANES","","The number of elements in the SIMD vector.",7,null],[16,"LanesType","","The type: `[u32; Self::N]`.",7,null],[8,"SimdArray","","Trait implemented by arrays that can be SIMD types.",null,null],[16,"Tuple","","The type of the #[repr(simd)] type.",8,null],[16,"T","","The element type of the vector.",8,null],[18,"N","","The number of elements in the array.",8,null],[16,"NT","","The type: `[u32; Self::N]`.",8,null],[8,"Mask","","This trait is implemented by all mask types",null,null],[10,"test","","",9,[[]]],[8,"FromCast","","Numeric cast from `T` to `Self`.",null,null],[10,"from_cast","","Numeric cast from `T` to `Self`.",10,[[]]],[8,"Cast","","Numeric cast from `Self` to `T`.",null,null],[10,"cast","","Numeric cast from `self` to `T`.",11,[[]]],[8,"FromBits","","Safe lossless bitwise conversion from `T` to `Self`.",null,null],[10,"from_bits","","Safe lossless bitwise transmute from `T` to `Self`.",12,[[]]],[8,"IntoBits","","Safe lossless bitwise conversion from `Self` to `T`.",null,null],[10,"into_bits","","Safe lossless bitwise transmute from `self` to `T`.",13,[[]]],[14,"shuffle","","Shuffles vector elements.",null,null],[11,"from","","",6,[[]]],[11,"try_from","","",6,[[],["result",4]]],[11,"into","","",6,[[]]],[11,"try_into","","",6,[[],["result",4]]],[11,"borrow","","",6,[[]]],[11,"borrow_mut","","",6,[[]]],[11,"type_id","","",6,[[],["typeid",3]]],[11,"from","","",14,[[]]],[11,"try_from","","",14,[[],["result",4]]],[11,"into","","",14,[[]]],[11,"try_into","","",14,[[],["result",4]]],[11,"borrow","","",14,[[]]],[11,"borrow_mut","","",14,[[]]],[11,"type_id","","",14,[[],["typeid",3]]],[11,"from","","",0,[[]]],[11,"try_from","","",0,[[],["result",4]]],[11,"into","","",0,[[]]],[11,"try_into","","",0,[[],["result",4]]],[11,"borrow","","",0,[[]]],[11,"borrow_mut","","",0,[[]]],[11,"type_id","","",0,[[],["typeid",3]]],[11,"from","","",1,[[]]],[11,"try_from","","",1,[[],["result",4]]],[11,"into","","",1,[[]]],[11,"try_into","","",1,[[],["result",4]]],[11,"borrow","","",1,[[]]],[11,"borrow_mut","","",1,[[]]],[11,"type_id","","",1,[[],["typeid",3]]],[11,"from","","",2,[[]]],[11,"try_from","","",2,[[],["result",4]]],[11,"into","","",2,[[]]],[11,"try_into","","",2,[[],["result",4]]],[11,"borrow","","",2,[[]]],[11,"borrow_mut","","",2,[[]]],[11,"type_id","","",2,[[],["typeid",3]]],[11,"from","","",3,[[]]],[11,"try_from","","",3,[[],["result",4]]],[11,"into","","",3,[[]]],[11,"try_into","","",3,[[],["result",4]]],[11,"borrow","","",3,[[]]],[11,"borrow_mut","","",3,[[]]],[11,"type_id","","",3,[[],["typeid",3]]],[11,"from","","",4,[[]]],[11,"try_from","","",4,[[],["result",4]]],[11,"into","","",4,[[]]],[11,"try_into","","",4,[[],["result",4]]],[11,"borrow","","",4,[[]]],[11,"borrow_mut","","",4,[[]]],[11,"type_id","","",4,[[],["typeid",3]]],[11,"from","","",5,[[]]],[11,"try_from","","",5,[[],["result",4]]],[11,"into","","",5,[[]]],[11,"try_into","","",5,[[],["result",4]]],[11,"borrow","","",5,[[]]],[11,"borrow_mut","","",5,[[]]],[11,"type_id","","",5,[[],["typeid",3]]],[11,"from_cast","","",15,[[["u8x2",6]]]],[11,"from_cast","","",15,[[["m8x2",6]]]],[11,"from_cast","","",15,[[["i16x2",6]]]],[11,"from_cast","","",15,[[["u16x2",6]]]],[11,"from_cast","","",15,[[["m16x2",6]]]],[11,"from_cast","","",15,[[["i32x2",6]]]],[11,"from_cast","","",15,[[["u32x2",6]]]],[11,"from_cast","","",15,[[["f32x2",6]]]],[11,"from_cast","","",15,[[["m32x2",6]]]],[11,"from_cast","","",15,[[["i64x2",6]]]],[11,"from_cast","","",15,[[["u64x2",6]]]],[11,"from_cast","","",15,[[["f64x2",6]]]],[11,"from_cast","","",15,[[["m64x2",6]]]],[11,"from_cast","","",15,[[["i128x2",6]]]],[11,"from_cast","","",15,[[["u128x2",6]]]],[11,"from_cast","","",15,[[["m128x2",6]]]],[11,"from_cast","","",15,[[["isizex2",6]]]],[11,"from_cast","","",15,[[["usizex2",6]]]],[11,"from_cast","","",15,[[["msizex2",6]]]],[11,"from_cast","","",16,[[["i8x2",6]]]],[11,"from_cast","","",16,[[["m8x2",6]]]],[11,"from_cast","","",16,[[["i16x2",6]]]],[11,"from_cast","","",16,[[["u16x2",6]]]],[11,"from_cast","","",16,[[["m16x2",6]]]],[11,"from_cast","","",16,[[["i32x2",6]]]],[11,"from_cast","","",16,[[["u32x2",6]]]],[11,"from_cast","","",16,[[["f32x2",6]]]],[11,"from_cast","","",16,[[["m32x2",6]]]],[11,"from_cast","","",16,[[["i64x2",6]]]],[11,"from_cast","","",16,[[["u64x2",6]]]],[11,"from_cast","","",16,[[["f64x2",6]]]],[11,"from_cast","","",16,[[["m64x2",6]]]],[11,"from_cast","","",16,[[["i128x2",6]]]],[11,"from_cast","","",16,[[["u128x2",6]]]],[11,"from_cast","","",16,[[["m128x2",6]]]],[11,"from_cast","","",16,[[["isizex2",6]]]],[11,"from_cast","","",16,[[["usizex2",6]]]],[11,"from_cast","","",16,[[["msizex2",6]]]],[11,"from_cast","","",17,[[["i8x2",6]]]],[11,"from_cast","","",17,[[["u8x2",6]]]],[11,"from_cast","","",17,[[["i16x2",6]]]],[11,"from_cast","","",17,[[["u16x2",6]]]],[11,"from_cast","","",17,[[["m16x2",6]]]],[11,"from_cast","","",17,[[["i32x2",6]]]],[11,"from_cast","","",17,[[["u32x2",6]]]],[11,"from_cast","","",17,[[["f32x2",6]]]],[11,"from_cast","","",17,[[["m32x2",6]]]],[11,"from_cast","","",17,[[["i64x2",6]]]],[11,"from_cast","","",17,[[["u64x2",6]]]],[11,"from_cast","","",17,[[["f64x2",6]]]],[11,"from_cast","","",17,[[["m64x2",6]]]],[11,"from_cast","","",17,[[["i128x2",6]]]],[11,"from_cast","","",17,[[["u128x2",6]]]],[11,"from_cast","","",17,[[["m128x2",6]]]],[11,"from_cast","","",17,[[["isizex2",6]]]],[11,"from_cast","","",17,[[["usizex2",6]]]],[11,"from_cast","","",17,[[["msizex2",6]]]],[11,"from_cast","","",18,[[["u8x4",6]]]],[11,"from_cast","","",18,[[["m8x4",6]]]],[11,"from_cast","","",18,[[["i16x4",6]]]],[11,"from_cast","","",18,[[["u16x4",6]]]],[11,"from_cast","","",18,[[["m16x4",6]]]],[11,"from_cast","","",18,[[["i32x4",6]]]],[11,"from_cast","","",18,[[["u32x4",6]]]],[11,"from_cast","","",18,[[["f32x4",6]]]],[11,"from_cast","","",18,[[["m32x4",6]]]],[11,"from_cast","","",18,[[["i64x4",6]]]],[11,"from_cast","","",18,[[["u64x4",6]]]],[11,"from_cast","","",18,[[["f64x4",6]]]],[11,"from_cast","","",18,[[["m64x4",6]]]],[11,"from_cast","","",18,[[["i128x4",6]]]],[11,"from_cast","","",18,[[["u128x4",6]]]],[11,"from_cast","","",18,[[["m128x4",6]]]],[11,"from_cast","","",18,[[["isizex4",6]]]],[11,"from_cast","","",18,[[["usizex4",6]]]],[11,"from_cast","","",18,[[["msizex4",6]]]],[11,"from_cast","","",19,[[["i8x4",6]]]],[11,"from_cast","","",19,[[["m8x4",6]]]],[11,"from_cast","","",19,[[["i16x4",6]]]],[11,"from_cast","","",19,[[["u16x4",6]]]],[11,"from_cast","","",19,[[["m16x4",6]]]],[11,"from_cast","","",19,[[["i32x4",6]]]],[11,"from_cast","","",19,[[["u32x4",6]]]],[11,"from_cast","","",19,[[["f32x4",6]]]],[11,"from_cast","","",19,[[["m32x4",6]]]],[11,"from_cast","","",19,[[["i64x4",6]]]],[11,"from_cast","","",19,[[["u64x4",6]]]],[11,"from_cast","","",19,[[["f64x4",6]]]],[11,"from_cast","","",19,[[["m64x4",6]]]],[11,"from_cast","","",19,[[["i128x4",6]]]],[11,"from_cast","","",19,[[["u128x4",6]]]],[11,"from_cast","","",19,[[["m128x4",6]]]],[11,"from_cast","","",19,[[["isizex4",6]]]],[11,"from_cast","","",19,[[["usizex4",6]]]],[11,"from_cast","","",19,[[["msizex4",6]]]],[11,"from_cast","","",20,[[["i8x4",6]]]],[11,"from_cast","","",20,[[["u8x4",6]]]],[11,"from_cast","","",20,[[["i16x4",6]]]],[11,"from_cast","","",20,[[["u16x4",6]]]],[11,"from_cast","","",20,[[["m16x4",6]]]],[11,"from_cast","","",20,[[["i32x4",6]]]],[11,"from_cast","","",20,[[["u32x4",6]]]],[11,"from_cast","","",20,[[["f32x4",6]]]],[11,"from_cast","","",20,[[["m32x4",6]]]],[11,"from_cast","","",20,[[["i64x4",6]]]],[11,"from_cast","","",20,[[["u64x4",6]]]],[11,"from_cast","","",20,[[["f64x4",6]]]],[11,"from_cast","","",20,[[["m64x4",6]]]],[11,"from_cast","","",20,[[["i128x4",6]]]],[11,"from_cast","","",20,[[["u128x4",6]]]],[11,"from_cast","","",20,[[["m128x4",6]]]],[11,"from_cast","","",20,[[["isizex4",6]]]],[11,"from_cast","","",20,[[["usizex4",6]]]],[11,"from_cast","","",20,[[["msizex4",6]]]],[11,"from_cast","","",21,[[["i8x2",6]]]],[11,"from_cast","","",21,[[["u8x2",6]]]],[11,"from_cast","","",21,[[["m8x2",6]]]],[11,"from_cast","","",21,[[["u16x2",6]]]],[11,"from_cast","","",21,[[["m16x2",6]]]],[11,"from_cast","","",21,[[["i32x2",6]]]],[11,"from_cast","","",21,[[["u32x2",6]]]],[11,"from_cast","","",21,[[["f32x2",6]]]],[11,"from_cast","","",21,[[["m32x2",6]]]],[11,"from_cast","","",21,[[["i64x2",6]]]],[11,"from_cast","","",21,[[["u64x2",6]]]],[11,"from_cast","","",21,[[["f64x2",6]]]],[11,"from_cast","","",21,[[["m64x2",6]]]],[11,"from_cast","","",21,[[["i128x2",6]]]],[11,"from_cast","","",21,[[["u128x2",6]]]],[11,"from_cast","","",21,[[["m128x2",6]]]],[11,"from_cast","","",21,[[["isizex2",6]]]],[11,"from_cast","","",21,[[["usizex2",6]]]],[11,"from_cast","","",21,[[["msizex2",6]]]],[11,"from_cast","","",22,[[["i8x2",6]]]],[11,"from_cast","","",22,[[["u8x2",6]]]],[11,"from_cast","","",22,[[["m8x2",6]]]],[11,"from_cast","","",22,[[["i16x2",6]]]],[11,"from_cast","","",22,[[["m16x2",6]]]],[11,"from_cast","","",22,[[["i32x2",6]]]],[11,"from_cast","","",22,[[["u32x2",6]]]],[11,"from_cast","","",22,[[["f32x2",6]]]],[11,"from_cast","","",22,[[["m32x2",6]]]],[11,"from_cast","","",22,[[["i64x2",6]]]],[11,"from_cast","","",22,[[["u64x2",6]]]],[11,"from_cast","","",22,[[["f64x2",6]]]],[11,"from_cast","","",22,[[["m64x2",6]]]],[11,"from_cast","","",22,[[["i128x2",6]]]],[11,"from_cast","","",22,[[["u128x2",6]]]],[11,"from_cast","","",22,[[["m128x2",6]]]],[11,"from_cast","","",22,[[["isizex2",6]]]],[11,"from_cast","","",22,[[["usizex2",6]]]],[11,"from_cast","","",22,[[["msizex2",6]]]],[11,"from_cast","","",23,[[["i8x2",6]]]],[11,"from_cast","","",23,[[["u8x2",6]]]],[11,"from_cast","","",23,[[["m8x2",6]]]],[11,"from_cast","","",23,[[["i16x2",6]]]],[11,"from_cast","","",23,[[["u16x2",6]]]],[11,"from_cast","","",23,[[["i32x2",6]]]],[11,"from_cast","","",23,[[["u32x2",6]]]],[11,"from_cast","","",23,[[["f32x2",6]]]],[11,"from_cast","","",23,[[["m32x2",6]]]],[11,"from_cast","","",23,[[["i64x2",6]]]],[11,"from_cast","","",23,[[["u64x2",6]]]],[11,"from_cast","","",23,[[["f64x2",6]]]],[11,"from_cast","","",23,[[["m64x2",6]]]],[11,"from_cast","","",23,[[["i128x2",6]]]],[11,"from_cast","","",23,[[["u128x2",6]]]],[11,"from_cast","","",23,[[["m128x2",6]]]],[11,"from_cast","","",23,[[["isizex2",6]]]],[11,"from_cast","","",23,[[["usizex2",6]]]],[11,"from_cast","","",23,[[["msizex2",6]]]],[11,"from_cast","","",24,[[["u8x8",6]]]],[11,"from_cast","","",24,[[["m8x8",6]]]],[11,"from_cast","","",24,[[["i16x8",6]]]],[11,"from_cast","","",24,[[["u16x8",6]]]],[11,"from_cast","","",24,[[["m16x8",6]]]],[11,"from_cast","","",24,[[["i32x8",6]]]],[11,"from_cast","","",24,[[["u32x8",6]]]],[11,"from_cast","","",24,[[["f32x8",6]]]],[11,"from_cast","","",24,[[["m32x8",6]]]],[11,"from_cast","","",24,[[["i64x8",6]]]],[11,"from_cast","","",24,[[["u64x8",6]]]],[11,"from_cast","","",24,[[["f64x8",6]]]],[11,"from_cast","","",24,[[["m64x8",6]]]],[11,"from_cast","","",24,[[["isizex8",6]]]],[11,"from_cast","","",24,[[["usizex8",6]]]],[11,"from_cast","","",24,[[["msizex8",6]]]],[11,"from_cast","","",25,[[["i8x8",6]]]],[11,"from_cast","","",25,[[["m8x8",6]]]],[11,"from_cast","","",25,[[["i16x8",6]]]],[11,"from_cast","","",25,[[["u16x8",6]]]],[11,"from_cast","","",25,[[["m16x8",6]]]],[11,"from_cast","","",25,[[["i32x8",6]]]],[11,"from_cast","","",25,[[["u32x8",6]]]],[11,"from_cast","","",25,[[["f32x8",6]]]],[11,"from_cast","","",25,[[["m32x8",6]]]],[11,"from_cast","","",25,[[["i64x8",6]]]],[11,"from_cast","","",25,[[["u64x8",6]]]],[11,"from_cast","","",25,[[["f64x8",6]]]],[11,"from_cast","","",25,[[["m64x8",6]]]],[11,"from_cast","","",25,[[["isizex8",6]]]],[11,"from_cast","","",25,[[["usizex8",6]]]],[11,"from_cast","","",25,[[["msizex8",6]]]],[11,"from_cast","","",26,[[["i8x8",6]]]],[11,"from_cast","","",26,[[["u8x8",6]]]],[11,"from_cast","","",26,[[["i16x8",6]]]],[11,"from_cast","","",26,[[["u16x8",6]]]],[11,"from_cast","","",26,[[["m16x8",6]]]],[11,"from_cast","","",26,[[["i32x8",6]]]],[11,"from_cast","","",26,[[["u32x8",6]]]],[11,"from_cast","","",26,[[["f32x8",6]]]],[11,"from_cast","","",26,[[["m32x8",6]]]],[11,"from_cast","","",26,[[["i64x8",6]]]],[11,"from_cast","","",26,[[["u64x8",6]]]],[11,"from_cast","","",26,[[["f64x8",6]]]],[11,"from_cast","","",26,[[["m64x8",6]]]],[11,"from_cast","","",26,[[["isizex8",6]]]],[11,"from_cast","","",26,[[["usizex8",6]]]],[11,"from_cast","","",26,[[["msizex8",6]]]],[11,"from_cast","","",27,[[["i8x4",6]]]],[11,"from_cast","","",27,[[["u8x4",6]]]],[11,"from_cast","","",27,[[["m8x4",6]]]],[11,"from_cast","","",27,[[["u16x4",6]]]],[11,"from_cast","","",27,[[["m16x4",6]]]],[11,"from_cast","","",27,[[["i32x4",6]]]],[11,"from_cast","","",27,[[["u32x4",6]]]],[11,"from_cast","","",27,[[["f32x4",6]]]],[11,"from_cast","","",27,[[["m32x4",6]]]],[11,"from_cast","","",27,[[["i64x4",6]]]],[11,"from_cast","","",27,[[["u64x4",6]]]],[11,"from_cast","","",27,[[["f64x4",6]]]],[11,"from_cast","","",27,[[["m64x4",6]]]],[11,"from_cast","","",27,[[["i128x4",6]]]],[11,"from_cast","","",27,[[["u128x4",6]]]],[11,"from_cast","","",27,[[["m128x4",6]]]],[11,"from_cast","","",27,[[["isizex4",6]]]],[11,"from_cast","","",27,[[["usizex4",6]]]],[11,"from_cast","","",27,[[["msizex4",6]]]],[11,"from_cast","","",28,[[["i8x4",6]]]],[11,"from_cast","","",28,[[["u8x4",6]]]],[11,"from_cast","","",28,[[["m8x4",6]]]],[11,"from_cast","","",28,[[["i16x4",6]]]],[11,"from_cast","","",28,[[["m16x4",6]]]],[11,"from_cast","","",28,[[["i32x4",6]]]],[11,"from_cast","","",28,[[["u32x4",6]]]],[11,"from_cast","","",28,[[["f32x4",6]]]],[11,"from_cast","","",28,[[["m32x4",6]]]],[11,"from_cast","","",28,[[["i64x4",6]]]],[11,"from_cast","","",28,[[["u64x4",6]]]],[11,"from_cast","","",28,[[["f64x4",6]]]],[11,"from_cast","","",28,[[["m64x4",6]]]],[11,"from_cast","","",28,[[["i128x4",6]]]],[11,"from_cast","","",28,[[["u128x4",6]]]],[11,"from_cast","","",28,[[["m128x4",6]]]],[11,"from_cast","","",28,[[["isizex4",6]]]],[11,"from_cast","","",28,[[["usizex4",6]]]],[11,"from_cast","","",28,[[["msizex4",6]]]],[11,"from_cast","","",29,[[["i8x4",6]]]],[11,"from_cast","","",29,[[["u8x4",6]]]],[11,"from_cast","","",29,[[["m8x4",6]]]],[11,"from_cast","","",29,[[["i16x4",6]]]],[11,"from_cast","","",29,[[["u16x4",6]]]],[11,"from_cast","","",29,[[["i32x4",6]]]],[11,"from_cast","","",29,[[["u32x4",6]]]],[11,"from_cast","","",29,[[["f32x4",6]]]],[11,"from_cast","","",29,[[["m32x4",6]]]],[11,"from_cast","","",29,[[["i64x4",6]]]],[11,"from_cast","","",29,[[["u64x4",6]]]],[11,"from_cast","","",29,[[["f64x4",6]]]],[11,"from_cast","","",29,[[["m64x4",6]]]],[11,"from_cast","","",29,[[["i128x4",6]]]],[11,"from_cast","","",29,[[["u128x4",6]]]],[11,"from_cast","","",29,[[["m128x4",6]]]],[11,"from_cast","","",29,[[["isizex4",6]]]],[11,"from_cast","","",29,[[["usizex4",6]]]],[11,"from_cast","","",29,[[["msizex4",6]]]],[11,"from_cast","","",30,[[["i8x2",6]]]],[11,"from_cast","","",30,[[["u8x2",6]]]],[11,"from_cast","","",30,[[["m8x2",6]]]],[11,"from_cast","","",30,[[["i16x2",6]]]],[11,"from_cast","","",30,[[["u16x2",6]]]],[11,"from_cast","","",30,[[["m16x2",6]]]],[11,"from_cast","","",30,[[["u32x2",6]]]],[11,"from_cast","","",30,[[["f32x2",6]]]],[11,"from_cast","","",30,[[["m32x2",6]]]],[11,"from_cast","","",30,[[["i64x2",6]]]],[11,"from_cast","","",30,[[["u64x2",6]]]],[11,"from_cast","","",30,[[["f64x2",6]]]],[11,"from_cast","","",30,[[["m64x2",6]]]],[11,"from_cast","","",30,[[["i128x2",6]]]],[11,"from_cast","","",30,[[["u128x2",6]]]],[11,"from_cast","","",30,[[["m128x2",6]]]],[11,"from_cast","","",30,[[["isizex2",6]]]],[11,"from_cast","","",30,[[["usizex2",6]]]],[11,"from_cast","","",30,[[["msizex2",6]]]],[11,"from_cast","","",31,[[["i8x2",6]]]],[11,"from_cast","","",31,[[["u8x2",6]]]],[11,"from_cast","","",31,[[["m8x2",6]]]],[11,"from_cast","","",31,[[["i16x2",6]]]],[11,"from_cast","","",31,[[["u16x2",6]]]],[11,"from_cast","","",31,[[["m16x2",6]]]],[11,"from_cast","","",31,[[["i32x2",6]]]],[11,"from_cast","","",31,[[["f32x2",6]]]],[11,"from_cast","","",31,[[["m32x2",6]]]],[11,"from_cast","","",31,[[["i64x2",6]]]],[11,"from_cast","","",31,[[["u64x2",6]]]],[11,"from_cast","","",31,[[["f64x2",6]]]],[11,"from_cast","","",31,[[["m64x2",6]]]],[11,"from_cast","","",31,[[["i128x2",6]]]],[11,"from_cast","","",31,[[["u128x2",6]]]],[11,"from_cast","","",31,[[["m128x2",6]]]],[11,"from_cast","","",31,[[["isizex2",6]]]],[11,"from_cast","","",31,[[["usizex2",6]]]],[11,"from_cast","","",31,[[["msizex2",6]]]],[11,"from_cast","","",32,[[["i8x2",6]]]],[11,"from_cast","","",32,[[["u8x2",6]]]],[11,"from_cast","","",32,[[["m8x2",6]]]],[11,"from_cast","","",32,[[["i16x2",6]]]],[11,"from_cast","","",32,[[["u16x2",6]]]],[11,"from_cast","","",32,[[["m16x2",6]]]],[11,"from_cast","","",32,[[["i32x2",6]]]],[11,"from_cast","","",32,[[["u32x2",6]]]],[11,"from_cast","","",32,[[["m32x2",6]]]],[11,"from_cast","","",32,[[["i64x2",6]]]],[11,"from_cast","","",32,[[["u64x2",6]]]],[11,"from_cast","","",32,[[["f64x2",6]]]],[11,"from_cast","","",32,[[["m64x2",6]]]],[11,"from_cast","","",32,[[["i128x2",6]]]],[11,"from_cast","","",32,[[["u128x2",6]]]],[11,"from_cast","","",32,[[["m128x2",6]]]],[11,"from_cast","","",32,[[["isizex2",6]]]],[11,"from_cast","","",32,[[["usizex2",6]]]],[11,"from_cast","","",32,[[["msizex2",6]]]],[11,"from_cast","","",33,[[["i8x2",6]]]],[11,"from_cast","","",33,[[["u8x2",6]]]],[11,"from_cast","","",33,[[["m8x2",6]]]],[11,"from_cast","","",33,[[["i16x2",6]]]],[11,"from_cast","","",33,[[["u16x2",6]]]],[11,"from_cast","","",33,[[["m16x2",6]]]],[11,"from_cast","","",33,[[["i32x2",6]]]],[11,"from_cast","","",33,[[["u32x2",6]]]],[11,"from_cast","","",33,[[["f32x2",6]]]],[11,"from_cast","","",33,[[["i64x2",6]]]],[11,"from_cast","","",33,[[["u64x2",6]]]],[11,"from_cast","","",33,[[["f64x2",6]]]],[11,"from_cast","","",33,[[["m64x2",6]]]],[11,"from_cast","","",33,[[["i128x2",6]]]],[11,"from_cast","","",33,[[["u128x2",6]]]],[11,"from_cast","","",33,[[["m128x2",6]]]],[11,"from_cast","","",33,[[["isizex2",6]]]],[11,"from_cast","","",33,[[["usizex2",6]]]],[11,"from_cast","","",33,[[["msizex2",6]]]],[11,"from_cast","","",34,[[["u8x16",6]]]],[11,"from_cast","","",34,[[["m8x16",6]]]],[11,"from_cast","","",34,[[["i16x16",6]]]],[11,"from_cast","","",34,[[["u16x16",6]]]],[11,"from_cast","","",34,[[["m16x16",6]]]],[11,"from_cast","","",34,[[["i32x16",6]]]],[11,"from_cast","","",34,[[["u32x16",6]]]],[11,"from_cast","","",34,[[["f32x16",6]]]],[11,"from_cast","","",34,[[["m32x16",6]]]],[11,"from_cast","","",35,[[["i8x16",6]]]],[11,"from_cast","","",35,[[["m8x16",6]]]],[11,"from_cast","","",35,[[["i16x16",6]]]],[11,"from_cast","","",35,[[["u16x16",6]]]],[11,"from_cast","","",35,[[["m16x16",6]]]],[11,"from_cast","","",35,[[["i32x16",6]]]],[11,"from_cast","","",35,[[["u32x16",6]]]],[11,"from_cast","","",35,[[["f32x16",6]]]],[11,"from_cast","","",35,[[["m32x16",6]]]],[11,"from_cast","","",36,[[["i8x16",6]]]],[11,"from_cast","","",36,[[["u8x16",6]]]],[11,"from_cast","","",36,[[["i16x16",6]]]],[11,"from_cast","","",36,[[["u16x16",6]]]],[11,"from_cast","","",36,[[["m16x16",6]]]],[11,"from_cast","","",36,[[["i32x16",6]]]],[11,"from_cast","","",36,[[["u32x16",6]]]],[11,"from_cast","","",36,[[["f32x16",6]]]],[11,"from_cast","","",36,[[["m32x16",6]]]],[11,"from_cast","","",37,[[["i8x8",6]]]],[11,"from_cast","","",37,[[["u8x8",6]]]],[11,"from_cast","","",37,[[["m8x8",6]]]],[11,"from_cast","","",37,[[["u16x8",6]]]],[11,"from_cast","","",37,[[["m16x8",6]]]],[11,"from_cast","","",37,[[["i32x8",6]]]],[11,"from_cast","","",37,[[["u32x8",6]]]],[11,"from_cast","","",37,[[["f32x8",6]]]],[11,"from_cast","","",37,[[["m32x8",6]]]],[11,"from_cast","","",37,[[["i64x8",6]]]],[11,"from_cast","","",37,[[["u64x8",6]]]],[11,"from_cast","","",37,[[["f64x8",6]]]],[11,"from_cast","","",37,[[["m64x8",6]]]],[11,"from_cast","","",37,[[["isizex8",6]]]],[11,"from_cast","","",37,[[["usizex8",6]]]],[11,"from_cast","","",37,[[["msizex8",6]]]],[11,"from_cast","","",38,[[["i8x8",6]]]],[11,"from_cast","","",38,[[["u8x8",6]]]],[11,"from_cast","","",38,[[["m8x8",6]]]],[11,"from_cast","","",38,[[["i16x8",6]]]],[11,"from_cast","","",38,[[["m16x8",6]]]],[11,"from_cast","","",38,[[["i32x8",6]]]],[11,"from_cast","","",38,[[["u32x8",6]]]],[11,"from_cast","","",38,[[["f32x8",6]]]],[11,"from_cast","","",38,[[["m32x8",6]]]],[11,"from_cast","","",38,[[["i64x8",6]]]],[11,"from_cast","","",38,[[["u64x8",6]]]],[11,"from_cast","","",38,[[["f64x8",6]]]],[11,"from_cast","","",38,[[["m64x8",6]]]],[11,"from_cast","","",38,[[["isizex8",6]]]],[11,"from_cast","","",38,[[["usizex8",6]]]],[11,"from_cast","","",38,[[["msizex8",6]]]],[11,"from_cast","","",39,[[["i8x8",6]]]],[11,"from_cast","","",39,[[["u8x8",6]]]],[11,"from_cast","","",39,[[["m8x8",6]]]],[11,"from_cast","","",39,[[["i16x8",6]]]],[11,"from_cast","","",39,[[["u16x8",6]]]],[11,"from_cast","","",39,[[["i32x8",6]]]],[11,"from_cast","","",39,[[["u32x8",6]]]],[11,"from_cast","","",39,[[["f32x8",6]]]],[11,"from_cast","","",39,[[["m32x8",6]]]],[11,"from_cast","","",39,[[["i64x8",6]]]],[11,"from_cast","","",39,[[["u64x8",6]]]],[11,"from_cast","","",39,[[["f64x8",6]]]],[11,"from_cast","","",39,[[["m64x8",6]]]],[11,"from_cast","","",39,[[["isizex8",6]]]],[11,"from_cast","","",39,[[["usizex8",6]]]],[11,"from_cast","","",39,[[["msizex8",6]]]],[11,"from_cast","","",40,[[["i8x4",6]]]],[11,"from_cast","","",40,[[["u8x4",6]]]],[11,"from_cast","","",40,[[["m8x4",6]]]],[11,"from_cast","","",40,[[["i16x4",6]]]],[11,"from_cast","","",40,[[["u16x4",6]]]],[11,"from_cast","","",40,[[["m16x4",6]]]],[11,"from_cast","","",40,[[["u32x4",6]]]],[11,"from_cast","","",40,[[["f32x4",6]]]],[11,"from_cast","","",40,[[["m32x4",6]]]],[11,"from_cast","","",40,[[["i64x4",6]]]],[11,"from_cast","","",40,[[["u64x4",6]]]],[11,"from_cast","","",40,[[["f64x4",6]]]],[11,"from_cast","","",40,[[["m64x4",6]]]],[11,"from_cast","","",40,[[["i128x4",6]]]],[11,"from_cast","","",40,[[["u128x4",6]]]],[11,"from_cast","","",40,[[["m128x4",6]]]],[11,"from_cast","","",40,[[["isizex4",6]]]],[11,"from_cast","","",40,[[["usizex4",6]]]],[11,"from_cast","","",40,[[["msizex4",6]]]],[11,"from_cast","","",41,[[["i8x4",6]]]],[11,"from_cast","","",41,[[["u8x4",6]]]],[11,"from_cast","","",41,[[["m8x4",6]]]],[11,"from_cast","","",41,[[["i16x4",6]]]],[11,"from_cast","","",41,[[["u16x4",6]]]],[11,"from_cast","","",41,[[["m16x4",6]]]],[11,"from_cast","","",41,[[["i32x4",6]]]],[11,"from_cast","","",41,[[["f32x4",6]]]],[11,"from_cast","","",41,[[["m32x4",6]]]],[11,"from_cast","","",41,[[["i64x4",6]]]],[11,"from_cast","","",41,[[["u64x4",6]]]],[11,"from_cast","","",41,[[["f64x4",6]]]],[11,"from_cast","","",41,[[["m64x4",6]]]],[11,"from_cast","","",41,[[["i128x4",6]]]],[11,"from_cast","","",41,[[["u128x4",6]]]],[11,"from_cast","","",41,[[["m128x4",6]]]],[11,"from_cast","","",41,[[["isizex4",6]]]],[11,"from_cast","","",41,[[["usizex4",6]]]],[11,"from_cast","","",41,[[["msizex4",6]]]],[11,"from_cast","","",42,[[["i8x4",6]]]],[11,"from_cast","","",42,[[["u8x4",6]]]],[11,"from_cast","","",42,[[["m8x4",6]]]],[11,"from_cast","","",42,[[["i16x4",6]]]],[11,"from_cast","","",42,[[["u16x4",6]]]],[11,"from_cast","","",42,[[["m16x4",6]]]],[11,"from_cast","","",42,[[["i32x4",6]]]],[11,"from_cast","","",42,[[["u32x4",6]]]],[11,"from_cast","","",42,[[["m32x4",6]]]],[11,"from_cast","","",42,[[["i64x4",6]]]],[11,"from_cast","","",42,[[["u64x4",6]]]],[11,"from_cast","","",42,[[["f64x4",6]]]],[11,"from_cast","","",42,[[["m64x4",6]]]],[11,"from_cast","","",42,[[["i128x4",6]]]],[11,"from_cast","","",42,[[["u128x4",6]]]],[11,"from_cast","","",42,[[["m128x4",6]]]],[11,"from_cast","","",42,[[["isizex4",6]]]],[11,"from_cast","","",42,[[["usizex4",6]]]],[11,"from_cast","","",42,[[["msizex4",6]]]],[11,"from_cast","","",43,[[["i8x4",6]]]],[11,"from_cast","","",43,[[["u8x4",6]]]],[11,"from_cast","","",43,[[["m8x4",6]]]],[11,"from_cast","","",43,[[["i16x4",6]]]],[11,"from_cast","","",43,[[["u16x4",6]]]],[11,"from_cast","","",43,[[["m16x4",6]]]],[11,"from_cast","","",43,[[["i32x4",6]]]],[11,"from_cast","","",43,[[["u32x4",6]]]],[11,"from_cast","","",43,[[["f32x4",6]]]],[11,"from_cast","","",43,[[["i64x4",6]]]],[11,"from_cast","","",43,[[["u64x4",6]]]],[11,"from_cast","","",43,[[["f64x4",6]]]],[11,"from_cast","","",43,[[["m64x4",6]]]],[11,"from_cast","","",43,[[["i128x4",6]]]],[11,"from_cast","","",43,[[["u128x4",6]]]],[11,"from_cast","","",43,[[["m128x4",6]]]],[11,"from_cast","","",43,[[["isizex4",6]]]],[11,"from_cast","","",43,[[["usizex4",6]]]],[11,"from_cast","","",43,[[["msizex4",6]]]],[11,"from_cast","","",44,[[["i8x2",6]]]],[11,"from_cast","","",44,[[["u8x2",6]]]],[11,"from_cast","","",44,[[["m8x2",6]]]],[11,"from_cast","","",44,[[["i16x2",6]]]],[11,"from_cast","","",44,[[["u16x2",6]]]],[11,"from_cast","","",44,[[["m16x2",6]]]],[11,"from_cast","","",44,[[["i32x2",6]]]],[11,"from_cast","","",44,[[["u32x2",6]]]],[11,"from_cast","","",44,[[["f32x2",6]]]],[11,"from_cast","","",44,[[["m32x2",6]]]],[11,"from_cast","","",44,[[["u64x2",6]]]],[11,"from_cast","","",44,[[["f64x2",6]]]],[11,"from_cast","","",44,[[["m64x2",6]]]],[11,"from_cast","","",44,[[["i128x2",6]]]],[11,"from_cast","","",44,[[["u128x2",6]]]],[11,"from_cast","","",44,[[["m128x2",6]]]],[11,"from_cast","","",44,[[["isizex2",6]]]],[11,"from_cast","","",44,[[["usizex2",6]]]],[11,"from_cast","","",44,[[["msizex2",6]]]],[11,"from_cast","","",45,[[["i8x2",6]]]],[11,"from_cast","","",45,[[["u8x2",6]]]],[11,"from_cast","","",45,[[["m8x2",6]]]],[11,"from_cast","","",45,[[["i16x2",6]]]],[11,"from_cast","","",45,[[["u16x2",6]]]],[11,"from_cast","","",45,[[["m16x2",6]]]],[11,"from_cast","","",45,[[["i32x2",6]]]],[11,"from_cast","","",45,[[["u32x2",6]]]],[11,"from_cast","","",45,[[["f32x2",6]]]],[11,"from_cast","","",45,[[["m32x2",6]]]],[11,"from_cast","","",45,[[["i64x2",6]]]],[11,"from_cast","","",45,[[["f64x2",6]]]],[11,"from_cast","","",45,[[["m64x2",6]]]],[11,"from_cast","","",45,[[["i128x2",6]]]],[11,"from_cast","","",45,[[["u128x2",6]]]],[11,"from_cast","","",45,[[["m128x2",6]]]],[11,"from_cast","","",45,[[["isizex2",6]]]],[11,"from_cast","","",45,[[["usizex2",6]]]],[11,"from_cast","","",45,[[["msizex2",6]]]],[11,"from_cast","","",46,[[["i8x2",6]]]],[11,"from_cast","","",46,[[["u8x2",6]]]],[11,"from_cast","","",46,[[["m8x2",6]]]],[11,"from_cast","","",46,[[["i16x2",6]]]],[11,"from_cast","","",46,[[["u16x2",6]]]],[11,"from_cast","","",46,[[["m16x2",6]]]],[11,"from_cast","","",46,[[["i32x2",6]]]],[11,"from_cast","","",46,[[["u32x2",6]]]],[11,"from_cast","","",46,[[["f32x2",6]]]],[11,"from_cast","","",46,[[["m32x2",6]]]],[11,"from_cast","","",46,[[["i64x2",6]]]],[11,"from_cast","","",46,[[["u64x2",6]]]],[11,"from_cast","","",46,[[["m64x2",6]]]],[11,"from_cast","","",46,[[["i128x2",6]]]],[11,"from_cast","","",46,[[["u128x2",6]]]],[11,"from_cast","","",46,[[["m128x2",6]]]],[11,"from_cast","","",46,[[["isizex2",6]]]],[11,"from_cast","","",46,[[["usizex2",6]]]],[11,"from_cast","","",46,[[["msizex2",6]]]],[11,"from_cast","","",47,[[["i8x2",6]]]],[11,"from_cast","","",47,[[["u8x2",6]]]],[11,"from_cast","","",47,[[["m8x2",6]]]],[11,"from_cast","","",47,[[["i16x2",6]]]],[11,"from_cast","","",47,[[["u16x2",6]]]],[11,"from_cast","","",47,[[["m16x2",6]]]],[11,"from_cast","","",47,[[["i32x2",6]]]],[11,"from_cast","","",47,[[["u32x2",6]]]],[11,"from_cast","","",47,[[["f32x2",6]]]],[11,"from_cast","","",47,[[["m32x2",6]]]],[11,"from_cast","","",47,[[["i64x2",6]]]],[11,"from_cast","","",47,[[["u64x2",6]]]],[11,"from_cast","","",47,[[["f64x2",6]]]],[11,"from_cast","","",47,[[["i128x2",6]]]],[11,"from_cast","","",47,[[["u128x2",6]]]],[11,"from_cast","","",47,[[["m128x2",6]]]],[11,"from_cast","","",47,[[["isizex2",6]]]],[11,"from_cast","","",47,[[["usizex2",6]]]],[11,"from_cast","","",47,[[["msizex2",6]]]],[11,"from_cast","","",48,[[["i8x2",6]]]],[11,"from_cast","","",48,[[["u8x2",6]]]],[11,"from_cast","","",48,[[["m8x2",6]]]],[11,"from_cast","","",48,[[["i16x2",6]]]],[11,"from_cast","","",48,[[["u16x2",6]]]],[11,"from_cast","","",48,[[["m16x2",6]]]],[11,"from_cast","","",48,[[["i32x2",6]]]],[11,"from_cast","","",48,[[["u32x2",6]]]],[11,"from_cast","","",48,[[["f32x2",6]]]],[11,"from_cast","","",48,[[["m32x2",6]]]],[11,"from_cast","","",48,[[["i64x2",6]]]],[11,"from_cast","","",48,[[["u64x2",6]]]],[11,"from_cast","","",48,[[["f64x2",6]]]],[11,"from_cast","","",48,[[["m64x2",6]]]],[11,"from_cast","","",48,[[["i128x2",6]]]],[11,"from_cast","","",48,[[["u128x2",6]]]],[11,"from_cast","","",48,[[["m128x2",6]]]],[11,"from_cast","","",48,[[["usizex2",6]]]],[11,"from_cast","","",48,[[["msizex2",6]]]],[11,"from_cast","","",49,[[["i8x2",6]]]],[11,"from_cast","","",49,[[["u8x2",6]]]],[11,"from_cast","","",49,[[["m8x2",6]]]],[11,"from_cast","","",49,[[["i16x2",6]]]],[11,"from_cast","","",49,[[["u16x2",6]]]],[11,"from_cast","","",49,[[["m16x2",6]]]],[11,"from_cast","","",49,[[["i32x2",6]]]],[11,"from_cast","","",49,[[["u32x2",6]]]],[11,"from_cast","","",49,[[["f32x2",6]]]],[11,"from_cast","","",49,[[["m32x2",6]]]],[11,"from_cast","","",49,[[["i64x2",6]]]],[11,"from_cast","","",49,[[["u64x2",6]]]],[11,"from_cast","","",49,[[["f64x2",6]]]],[11,"from_cast","","",49,[[["m64x2",6]]]],[11,"from_cast","","",49,[[["i128x2",6]]]],[11,"from_cast","","",49,[[["u128x2",6]]]],[11,"from_cast","","",49,[[["m128x2",6]]]],[11,"from_cast","","",49,[[["isizex2",6]]]],[11,"from_cast","","",49,[[["msizex2",6]]]],[11,"from_cast","","",50,[[["i8x2",6]]]],[11,"from_cast","","",50,[[["u8x2",6]]]],[11,"from_cast","","",50,[[["m8x2",6]]]],[11,"from_cast","","",50,[[["i16x2",6]]]],[11,"from_cast","","",50,[[["u16x2",6]]]],[11,"from_cast","","",50,[[["m16x2",6]]]],[11,"from_cast","","",50,[[["i32x2",6]]]],[11,"from_cast","","",50,[[["u32x2",6]]]],[11,"from_cast","","",50,[[["f32x2",6]]]],[11,"from_cast","","",50,[[["m32x2",6]]]],[11,"from_cast","","",50,[[["i64x2",6]]]],[11,"from_cast","","",50,[[["u64x2",6]]]],[11,"from_cast","","",50,[[["f64x2",6]]]],[11,"from_cast","","",50,[[["m64x2",6]]]],[11,"from_cast","","",50,[[["i128x2",6]]]],[11,"from_cast","","",50,[[["u128x2",6]]]],[11,"from_cast","","",50,[[["m128x2",6]]]],[11,"from_cast","","",50,[[["isizex2",6]]]],[11,"from_cast","","",50,[[["usizex2",6]]]],[11,"from_cast","","",51,[[["u128x1",6]]]],[11,"from_cast","","",51,[[["m128x1",6]]]],[11,"from_cast","","",52,[[["i128x1",6]]]],[11,"from_cast","","",52,[[["m128x1",6]]]],[11,"from_cast","","",53,[[["i128x1",6]]]],[11,"from_cast","","",53,[[["u128x1",6]]]],[11,"from_cast","","",54,[[["u8x32",6]]]],[11,"from_cast","","",54,[[["m8x32",6]]]],[11,"from_cast","","",54,[[["i16x32",6]]]],[11,"from_cast","","",54,[[["u16x32",6]]]],[11,"from_cast","","",54,[[["m16x32",6]]]],[11,"from_cast","","",55,[[["i8x32",6]]]],[11,"from_cast","","",55,[[["m8x32",6]]]],[11,"from_cast","","",55,[[["i16x32",6]]]],[11,"from_cast","","",55,[[["u16x32",6]]]],[11,"from_cast","","",55,[[["m16x32",6]]]],[11,"from_cast","","",56,[[["i8x32",6]]]],[11,"from_cast","","",56,[[["u8x32",6]]]],[11,"from_cast","","",56,[[["i16x32",6]]]],[11,"from_cast","","",56,[[["u16x32",6]]]],[11,"from_cast","","",56,[[["m16x32",6]]]],[11,"from_cast","","",57,[[["i8x16",6]]]],[11,"from_cast","","",57,[[["u8x16",6]]]],[11,"from_cast","","",57,[[["m8x16",6]]]],[11,"from_cast","","",57,[[["u16x16",6]]]],[11,"from_cast","","",57,[[["m16x16",6]]]],[11,"from_cast","","",57,[[["i32x16",6]]]],[11,"from_cast","","",57,[[["u32x16",6]]]],[11,"from_cast","","",57,[[["f32x16",6]]]],[11,"from_cast","","",57,[[["m32x16",6]]]],[11,"from_cast","","",58,[[["i8x16",6]]]],[11,"from_cast","","",58,[[["u8x16",6]]]],[11,"from_cast","","",58,[[["m8x16",6]]]],[11,"from_cast","","",58,[[["i16x16",6]]]],[11,"from_cast","","",58,[[["m16x16",6]]]],[11,"from_cast","","",58,[[["i32x16",6]]]],[11,"from_cast","","",58,[[["u32x16",6]]]],[11,"from_cast","","",58,[[["f32x16",6]]]],[11,"from_cast","","",58,[[["m32x16",6]]]],[11,"from_cast","","",59,[[["i8x16",6]]]],[11,"from_cast","","",59,[[["u8x16",6]]]],[11,"from_cast","","",59,[[["m8x16",6]]]],[11,"from_cast","","",59,[[["i16x16",6]]]],[11,"from_cast","","",59,[[["u16x16",6]]]],[11,"from_cast","","",59,[[["i32x16",6]]]],[11,"from_cast","","",59,[[["u32x16",6]]]],[11,"from_cast","","",59,[[["f32x16",6]]]],[11,"from_cast","","",59,[[["m32x16",6]]]],[11,"from_cast","","",60,[[["i8x8",6]]]],[11,"from_cast","","",60,[[["u8x8",6]]]],[11,"from_cast","","",60,[[["m8x8",6]]]],[11,"from_cast","","",60,[[["i16x8",6]]]],[11,"from_cast","","",60,[[["u16x8",6]]]],[11,"from_cast","","",60,[[["m16x8",6]]]],[11,"from_cast","","",60,[[["u32x8",6]]]],[11,"from_cast","","",60,[[["f32x8",6]]]],[11,"from_cast","","",60,[[["m32x8",6]]]],[11,"from_cast","","",60,[[["i64x8",6]]]],[11,"from_cast","","",60,[[["u64x8",6]]]],[11,"from_cast","","",60,[[["f64x8",6]]]],[11,"from_cast","","",60,[[["m64x8",6]]]],[11,"from_cast","","",60,[[["isizex8",6]]]],[11,"from_cast","","",60,[[["usizex8",6]]]],[11,"from_cast","","",60,[[["msizex8",6]]]],[11,"from_cast","","",61,[[["i8x8",6]]]],[11,"from_cast","","",61,[[["u8x8",6]]]],[11,"from_cast","","",61,[[["m8x8",6]]]],[11,"from_cast","","",61,[[["i16x8",6]]]],[11,"from_cast","","",61,[[["u16x8",6]]]],[11,"from_cast","","",61,[[["m16x8",6]]]],[11,"from_cast","","",61,[[["i32x8",6]]]],[11,"from_cast","","",61,[[["f32x8",6]]]],[11,"from_cast","","",61,[[["m32x8",6]]]],[11,"from_cast","","",61,[[["i64x8",6]]]],[11,"from_cast","","",61,[[["u64x8",6]]]],[11,"from_cast","","",61,[[["f64x8",6]]]],[11,"from_cast","","",61,[[["m64x8",6]]]],[11,"from_cast","","",61,[[["isizex8",6]]]],[11,"from_cast","","",61,[[["usizex8",6]]]],[11,"from_cast","","",61,[[["msizex8",6]]]],[11,"from_cast","","",62,[[["i8x8",6]]]],[11,"from_cast","","",62,[[["u8x8",6]]]],[11,"from_cast","","",62,[[["m8x8",6]]]],[11,"from_cast","","",62,[[["i16x8",6]]]],[11,"from_cast","","",62,[[["u16x8",6]]]],[11,"from_cast","","",62,[[["m16x8",6]]]],[11,"from_cast","","",62,[[["i32x8",6]]]],[11,"from_cast","","",62,[[["u32x8",6]]]],[11,"from_cast","","",62,[[["m32x8",6]]]],[11,"from_cast","","",62,[[["i64x8",6]]]],[11,"from_cast","","",62,[[["u64x8",6]]]],[11,"from_cast","","",62,[[["f64x8",6]]]],[11,"from_cast","","",62,[[["m64x8",6]]]],[11,"from_cast","","",62,[[["isizex8",6]]]],[11,"from_cast","","",62,[[["usizex8",6]]]],[11,"from_cast","","",62,[[["msizex8",6]]]],[11,"from_cast","","",63,[[["i8x8",6]]]],[11,"from_cast","","",63,[[["u8x8",6]]]],[11,"from_cast","","",63,[[["m8x8",6]]]],[11,"from_cast","","",63,[[["i16x8",6]]]],[11,"from_cast","","",63,[[["u16x8",6]]]],[11,"from_cast","","",63,[[["m16x8",6]]]],[11,"from_cast","","",63,[[["i32x8",6]]]],[11,"from_cast","","",63,[[["u32x8",6]]]],[11,"from_cast","","",63,[[["f32x8",6]]]],[11,"from_cast","","",63,[[["i64x8",6]]]],[11,"from_cast","","",63,[[["u64x8",6]]]],[11,"from_cast","","",63,[[["f64x8",6]]]],[11,"from_cast","","",63,[[["m64x8",6]]]],[11,"from_cast","","",63,[[["isizex8",6]]]],[11,"from_cast","","",63,[[["usizex8",6]]]],[11,"from_cast","","",63,[[["msizex8",6]]]],[11,"from_cast","","",64,[[["i8x4",6]]]],[11,"from_cast","","",64,[[["u8x4",6]]]],[11,"from_cast","","",64,[[["m8x4",6]]]],[11,"from_cast","","",64,[[["i16x4",6]]]],[11,"from_cast","","",64,[[["u16x4",6]]]],[11,"from_cast","","",64,[[["m16x4",6]]]],[11,"from_cast","","",64,[[["i32x4",6]]]],[11,"from_cast","","",64,[[["u32x4",6]]]],[11,"from_cast","","",64,[[["f32x4",6]]]],[11,"from_cast","","",64,[[["m32x4",6]]]],[11,"from_cast","","",64,[[["u64x4",6]]]],[11,"from_cast","","",64,[[["f64x4",6]]]],[11,"from_cast","","",64,[[["m64x4",6]]]],[11,"from_cast","","",64,[[["i128x4",6]]]],[11,"from_cast","","",64,[[["u128x4",6]]]],[11,"from_cast","","",64,[[["m128x4",6]]]],[11,"from_cast","","",64,[[["isizex4",6]]]],[11,"from_cast","","",64,[[["usizex4",6]]]],[11,"from_cast","","",64,[[["msizex4",6]]]],[11,"from_cast","","",65,[[["i8x4",6]]]],[11,"from_cast","","",65,[[["u8x4",6]]]],[11,"from_cast","","",65,[[["m8x4",6]]]],[11,"from_cast","","",65,[[["i16x4",6]]]],[11,"from_cast","","",65,[[["u16x4",6]]]],[11,"from_cast","","",65,[[["m16x4",6]]]],[11,"from_cast","","",65,[[["i32x4",6]]]],[11,"from_cast","","",65,[[["u32x4",6]]]],[11,"from_cast","","",65,[[["f32x4",6]]]],[11,"from_cast","","",65,[[["m32x4",6]]]],[11,"from_cast","","",65,[[["i64x4",6]]]],[11,"from_cast","","",65,[[["f64x4",6]]]],[11,"from_cast","","",65,[[["m64x4",6]]]],[11,"from_cast","","",65,[[["i128x4",6]]]],[11,"from_cast","","",65,[[["u128x4",6]]]],[11,"from_cast","","",65,[[["m128x4",6]]]],[11,"from_cast","","",65,[[["isizex4",6]]]],[11,"from_cast","","",65,[[["usizex4",6]]]],[11,"from_cast","","",65,[[["msizex4",6]]]],[11,"from_cast","","",66,[[["i8x4",6]]]],[11,"from_cast","","",66,[[["u8x4",6]]]],[11,"from_cast","","",66,[[["m8x4",6]]]],[11,"from_cast","","",66,[[["i16x4",6]]]],[11,"from_cast","","",66,[[["u16x4",6]]]],[11,"from_cast","","",66,[[["m16x4",6]]]],[11,"from_cast","","",66,[[["i32x4",6]]]],[11,"from_cast","","",66,[[["u32x4",6]]]],[11,"from_cast","","",66,[[["f32x4",6]]]],[11,"from_cast","","",66,[[["m32x4",6]]]],[11,"from_cast","","",66,[[["i64x4",6]]]],[11,"from_cast","","",66,[[["u64x4",6]]]],[11,"from_cast","","",66,[[["m64x4",6]]]],[11,"from_cast","","",66,[[["i128x4",6]]]],[11,"from_cast","","",66,[[["u128x4",6]]]],[11,"from_cast","","",66,[[["m128x4",6]]]],[11,"from_cast","","",66,[[["isizex4",6]]]],[11,"from_cast","","",66,[[["usizex4",6]]]],[11,"from_cast","","",66,[[["msizex4",6]]]],[11,"from_cast","","",67,[[["i8x4",6]]]],[11,"from_cast","","",67,[[["u8x4",6]]]],[11,"from_cast","","",67,[[["m8x4",6]]]],[11,"from_cast","","",67,[[["i16x4",6]]]],[11,"from_cast","","",67,[[["u16x4",6]]]],[11,"from_cast","","",67,[[["m16x4",6]]]],[11,"from_cast","","",67,[[["i32x4",6]]]],[11,"from_cast","","",67,[[["u32x4",6]]]],[11,"from_cast","","",67,[[["f32x4",6]]]],[11,"from_cast","","",67,[[["m32x4",6]]]],[11,"from_cast","","",67,[[["i64x4",6]]]],[11,"from_cast","","",67,[[["u64x4",6]]]],[11,"from_cast","","",67,[[["f64x4",6]]]],[11,"from_cast","","",67,[[["i128x4",6]]]],[11,"from_cast","","",67,[[["u128x4",6]]]],[11,"from_cast","","",67,[[["m128x4",6]]]],[11,"from_cast","","",67,[[["isizex4",6]]]],[11,"from_cast","","",67,[[["usizex4",6]]]],[11,"from_cast","","",67,[[["msizex4",6]]]],[11,"from_cast","","",68,[[["i8x2",6]]]],[11,"from_cast","","",68,[[["u8x2",6]]]],[11,"from_cast","","",68,[[["m8x2",6]]]],[11,"from_cast","","",68,[[["i16x2",6]]]],[11,"from_cast","","",68,[[["u16x2",6]]]],[11,"from_cast","","",68,[[["m16x2",6]]]],[11,"from_cast","","",68,[[["i32x2",6]]]],[11,"from_cast","","",68,[[["u32x2",6]]]],[11,"from_cast","","",68,[[["f32x2",6]]]],[11,"from_cast","","",68,[[["m32x2",6]]]],[11,"from_cast","","",68,[[["i64x2",6]]]],[11,"from_cast","","",68,[[["u64x2",6]]]],[11,"from_cast","","",68,[[["f64x2",6]]]],[11,"from_cast","","",68,[[["m64x2",6]]]],[11,"from_cast","","",68,[[["u128x2",6]]]],[11,"from_cast","","",68,[[["m128x2",6]]]],[11,"from_cast","","",68,[[["isizex2",6]]]],[11,"from_cast","","",68,[[["usizex2",6]]]],[11,"from_cast","","",68,[[["msizex2",6]]]],[11,"from_cast","","",69,[[["i8x2",6]]]],[11,"from_cast","","",69,[[["u8x2",6]]]],[11,"from_cast","","",69,[[["m8x2",6]]]],[11,"from_cast","","",69,[[["i16x2",6]]]],[11,"from_cast","","",69,[[["u16x2",6]]]],[11,"from_cast","","",69,[[["m16x2",6]]]],[11,"from_cast","","",69,[[["i32x2",6]]]],[11,"from_cast","","",69,[[["u32x2",6]]]],[11,"from_cast","","",69,[[["f32x2",6]]]],[11,"from_cast","","",69,[[["m32x2",6]]]],[11,"from_cast","","",69,[[["i64x2",6]]]],[11,"from_cast","","",69,[[["u64x2",6]]]],[11,"from_cast","","",69,[[["f64x2",6]]]],[11,"from_cast","","",69,[[["m64x2",6]]]],[11,"from_cast","","",69,[[["i128x2",6]]]],[11,"from_cast","","",69,[[["m128x2",6]]]],[11,"from_cast","","",69,[[["isizex2",6]]]],[11,"from_cast","","",69,[[["usizex2",6]]]],[11,"from_cast","","",69,[[["msizex2",6]]]],[11,"from_cast","","",70,[[["i8x2",6]]]],[11,"from_cast","","",70,[[["u8x2",6]]]],[11,"from_cast","","",70,[[["m8x2",6]]]],[11,"from_cast","","",70,[[["i16x2",6]]]],[11,"from_cast","","",70,[[["u16x2",6]]]],[11,"from_cast","","",70,[[["m16x2",6]]]],[11,"from_cast","","",70,[[["i32x2",6]]]],[11,"from_cast","","",70,[[["u32x2",6]]]],[11,"from_cast","","",70,[[["f32x2",6]]]],[11,"from_cast","","",70,[[["m32x2",6]]]],[11,"from_cast","","",70,[[["i64x2",6]]]],[11,"from_cast","","",70,[[["u64x2",6]]]],[11,"from_cast","","",70,[[["m64x2",6]]]],[11,"from_cast","","",70,[[["f64x2",6]]]],[11,"from_cast","","",70,[[["i128x2",6]]]],[11,"from_cast","","",70,[[["u128x2",6]]]],[11,"from_cast","","",70,[[["isizex2",6]]]],[11,"from_cast","","",70,[[["usizex2",6]]]],[11,"from_cast","","",70,[[["msizex2",6]]]],[11,"from_cast","","",71,[[["i8x4",6]]]],[11,"from_cast","","",71,[[["u8x4",6]]]],[11,"from_cast","","",71,[[["m8x4",6]]]],[11,"from_cast","","",71,[[["i16x4",6]]]],[11,"from_cast","","",71,[[["u16x4",6]]]],[11,"from_cast","","",71,[[["m16x4",6]]]],[11,"from_cast","","",71,[[["i32x4",6]]]],[11,"from_cast","","",71,[[["u32x4",6]]]],[11,"from_cast","","",71,[[["f32x4",6]]]],[11,"from_cast","","",71,[[["m32x4",6]]]],[11,"from_cast","","",71,[[["i64x4",6]]]],[11,"from_cast","","",71,[[["u64x4",6]]]],[11,"from_cast","","",71,[[["f64x4",6]]]],[11,"from_cast","","",71,[[["m64x4",6]]]],[11,"from_cast","","",71,[[["i128x4",6]]]],[11,"from_cast","","",71,[[["u128x4",6]]]],[11,"from_cast","","",71,[[["m128x4",6]]]],[11,"from_cast","","",71,[[["usizex4",6]]]],[11,"from_cast","","",71,[[["msizex4",6]]]],[11,"from_cast","","",72,[[["i8x4",6]]]],[11,"from_cast","","",72,[[["u8x4",6]]]],[11,"from_cast","","",72,[[["m8x4",6]]]],[11,"from_cast","","",72,[[["i16x4",6]]]],[11,"from_cast","","",72,[[["u16x4",6]]]],[11,"from_cast","","",72,[[["m16x4",6]]]],[11,"from_cast","","",72,[[["i32x4",6]]]],[11,"from_cast","","",72,[[["u32x4",6]]]],[11,"from_cast","","",72,[[["f32x4",6]]]],[11,"from_cast","","",72,[[["m32x4",6]]]],[11,"from_cast","","",72,[[["i64x4",6]]]],[11,"from_cast","","",72,[[["u64x4",6]]]],[11,"from_cast","","",72,[[["f64x4",6]]]],[11,"from_cast","","",72,[[["m64x4",6]]]],[11,"from_cast","","",72,[[["i128x4",6]]]],[11,"from_cast","","",72,[[["u128x4",6]]]],[11,"from_cast","","",72,[[["m128x4",6]]]],[11,"from_cast","","",72,[[["isizex4",6]]]],[11,"from_cast","","",72,[[["msizex4",6]]]],[11,"from_cast","","",73,[[["i8x4",6]]]],[11,"from_cast","","",73,[[["u8x4",6]]]],[11,"from_cast","","",73,[[["m8x4",6]]]],[11,"from_cast","","",73,[[["i16x4",6]]]],[11,"from_cast","","",73,[[["u16x4",6]]]],[11,"from_cast","","",73,[[["m16x4",6]]]],[11,"from_cast","","",73,[[["i32x4",6]]]],[11,"from_cast","","",73,[[["u32x4",6]]]],[11,"from_cast","","",73,[[["f32x4",6]]]],[11,"from_cast","","",73,[[["m32x4",6]]]],[11,"from_cast","","",73,[[["i64x4",6]]]],[11,"from_cast","","",73,[[["u64x4",6]]]],[11,"from_cast","","",73,[[["f64x4",6]]]],[11,"from_cast","","",73,[[["m64x4",6]]]],[11,"from_cast","","",73,[[["i128x4",6]]]],[11,"from_cast","","",73,[[["u128x4",6]]]],[11,"from_cast","","",73,[[["m128x4",6]]]],[11,"from_cast","","",73,[[["isizex4",6]]]],[11,"from_cast","","",73,[[["usizex4",6]]]],[11,"from_cast","","",74,[[["u8x64",6]]]],[11,"from_cast","","",74,[[["m8x64",6]]]],[11,"from_cast","","",75,[[["i8x64",6]]]],[11,"from_cast","","",75,[[["m8x64",6]]]],[11,"from_cast","","",76,[[["i8x64",6]]]],[11,"from_cast","","",76,[[["u8x64",6]]]],[11,"from_cast","","",77,[[["i8x32",6]]]],[11,"from_cast","","",77,[[["u8x32",6]]]],[11,"from_cast","","",77,[[["m8x32",6]]]],[11,"from_cast","","",77,[[["u16x32",6]]]],[11,"from_cast","","",77,[[["m16x32",6]]]],[11,"from_cast","","",78,[[["i8x32",6]]]],[11,"from_cast","","",78,[[["u8x32",6]]]],[11,"from_cast","","",78,[[["m8x32",6]]]],[11,"from_cast","","",78,[[["i16x32",6]]]],[11,"from_cast","","",78,[[["m16x32",6]]]],[11,"from_cast","","",79,[[["i8x32",6]]]],[11,"from_cast","","",79,[[["u8x32",6]]]],[11,"from_cast","","",79,[[["m8x32",6]]]],[11,"from_cast","","",79,[[["i16x32",6]]]],[11,"from_cast","","",79,[[["u16x32",6]]]],[11,"from_cast","","",80,[[["i8x16",6]]]],[11,"from_cast","","",80,[[["u8x16",6]]]],[11,"from_cast","","",80,[[["m8x16",6]]]],[11,"from_cast","","",80,[[["i16x16",6]]]],[11,"from_cast","","",80,[[["u16x16",6]]]],[11,"from_cast","","",80,[[["m16x16",6]]]],[11,"from_cast","","",80,[[["u32x16",6]]]],[11,"from_cast","","",80,[[["f32x16",6]]]],[11,"from_cast","","",80,[[["m32x16",6]]]],[11,"from_cast","","",81,[[["i8x16",6]]]],[11,"from_cast","","",81,[[["u8x16",6]]]],[11,"from_cast","","",81,[[["m8x16",6]]]],[11,"from_cast","","",81,[[["i16x16",6]]]],[11,"from_cast","","",81,[[["u16x16",6]]]],[11,"from_cast","","",81,[[["m16x16",6]]]],[11,"from_cast","","",81,[[["i32x16",6]]]],[11,"from_cast","","",81,[[["f32x16",6]]]],[11,"from_cast","","",81,[[["m32x16",6]]]],[11,"from_cast","","",82,[[["i8x16",6]]]],[11,"from_cast","","",82,[[["u8x16",6]]]],[11,"from_cast","","",82,[[["m8x16",6]]]],[11,"from_cast","","",82,[[["i16x16",6]]]],[11,"from_cast","","",82,[[["u16x16",6]]]],[11,"from_cast","","",82,[[["m16x16",6]]]],[11,"from_cast","","",82,[[["i32x16",6]]]],[11,"from_cast","","",82,[[["u32x16",6]]]],[11,"from_cast","","",82,[[["m32x16",6]]]],[11,"from_cast","","",83,[[["i8x16",6]]]],[11,"from_cast","","",83,[[["u8x16",6]]]],[11,"from_cast","","",83,[[["m8x16",6]]]],[11,"from_cast","","",83,[[["i16x16",6]]]],[11,"from_cast","","",83,[[["u16x16",6]]]],[11,"from_cast","","",83,[[["m16x16",6]]]],[11,"from_cast","","",83,[[["i32x16",6]]]],[11,"from_cast","","",83,[[["u32x16",6]]]],[11,"from_cast","","",83,[[["f32x16",6]]]],[11,"from_cast","","",84,[[["i8x8",6]]]],[11,"from_cast","","",84,[[["u8x8",6]]]],[11,"from_cast","","",84,[[["m8x8",6]]]],[11,"from_cast","","",84,[[["i16x8",6]]]],[11,"from_cast","","",84,[[["u16x8",6]]]],[11,"from_cast","","",84,[[["m16x8",6]]]],[11,"from_cast","","",84,[[["i32x8",6]]]],[11,"from_cast","","",84,[[["u32x8",6]]]],[11,"from_cast","","",84,[[["f32x8",6]]]],[11,"from_cast","","",84,[[["m32x8",6]]]],[11,"from_cast","","",84,[[["u64x8",6]]]],[11,"from_cast","","",84,[[["f64x8",6]]]],[11,"from_cast","","",84,[[["m64x8",6]]]],[11,"from_cast","","",84,[[["isizex8",6]]]],[11,"from_cast","","",84,[[["usizex8",6]]]],[11,"from_cast","","",84,[[["msizex8",6]]]],[11,"from_cast","","",85,[[["i8x8",6]]]],[11,"from_cast","","",85,[[["u8x8",6]]]],[11,"from_cast","","",85,[[["m8x8",6]]]],[11,"from_cast","","",85,[[["i16x8",6]]]],[11,"from_cast","","",85,[[["u16x8",6]]]],[11,"from_cast","","",85,[[["m16x8",6]]]],[11,"from_cast","","",85,[[["i32x8",6]]]],[11,"from_cast","","",85,[[["u32x8",6]]]],[11,"from_cast","","",85,[[["f32x8",6]]]],[11,"from_cast","","",85,[[["m32x8",6]]]],[11,"from_cast","","",85,[[["i64x8",6]]]],[11,"from_cast","","",85,[[["f64x8",6]]]],[11,"from_cast","","",85,[[["m64x8",6]]]],[11,"from_cast","","",85,[[["isizex8",6]]]],[11,"from_cast","","",85,[[["usizex8",6]]]],[11,"from_cast","","",85,[[["msizex8",6]]]],[11,"from_cast","","",86,[[["i8x8",6]]]],[11,"from_cast","","",86,[[["u8x8",6]]]],[11,"from_cast","","",86,[[["m8x8",6]]]],[11,"from_cast","","",86,[[["i16x8",6]]]],[11,"from_cast","","",86,[[["u16x8",6]]]],[11,"from_cast","","",86,[[["m16x8",6]]]],[11,"from_cast","","",86,[[["i32x8",6]]]],[11,"from_cast","","",86,[[["u32x8",6]]]],[11,"from_cast","","",86,[[["f32x8",6]]]],[11,"from_cast","","",86,[[["m32x8",6]]]],[11,"from_cast","","",86,[[["i64x8",6]]]],[11,"from_cast","","",86,[[["u64x8",6]]]],[11,"from_cast","","",86,[[["m64x8",6]]]],[11,"from_cast","","",86,[[["isizex8",6]]]],[11,"from_cast","","",86,[[["usizex8",6]]]],[11,"from_cast","","",86,[[["msizex8",6]]]],[11,"from_cast","","",87,[[["i8x8",6]]]],[11,"from_cast","","",87,[[["u8x8",6]]]],[11,"from_cast","","",87,[[["m8x8",6]]]],[11,"from_cast","","",87,[[["i16x8",6]]]],[11,"from_cast","","",87,[[["u16x8",6]]]],[11,"from_cast","","",87,[[["m16x8",6]]]],[11,"from_cast","","",87,[[["i32x8",6]]]],[11,"from_cast","","",87,[[["u32x8",6]]]],[11,"from_cast","","",87,[[["f32x8",6]]]],[11,"from_cast","","",87,[[["m32x8",6]]]],[11,"from_cast","","",87,[[["i64x8",6]]]],[11,"from_cast","","",87,[[["u64x8",6]]]],[11,"from_cast","","",87,[[["f64x8",6]]]],[11,"from_cast","","",87,[[["isizex8",6]]]],[11,"from_cast","","",87,[[["usizex8",6]]]],[11,"from_cast","","",87,[[["msizex8",6]]]],[11,"from_cast","","",88,[[["i8x4",6]]]],[11,"from_cast","","",88,[[["u8x4",6]]]],[11,"from_cast","","",88,[[["m8x4",6]]]],[11,"from_cast","","",88,[[["i16x4",6]]]],[11,"from_cast","","",88,[[["u16x4",6]]]],[11,"from_cast","","",88,[[["m16x4",6]]]],[11,"from_cast","","",88,[[["i32x4",6]]]],[11,"from_cast","","",88,[[["u32x4",6]]]],[11,"from_cast","","",88,[[["f32x4",6]]]],[11,"from_cast","","",88,[[["m32x4",6]]]],[11,"from_cast","","",88,[[["i64x4",6]]]],[11,"from_cast","","",88,[[["u64x4",6]]]],[11,"from_cast","","",88,[[["f64x4",6]]]],[11,"from_cast","","",88,[[["m64x4",6]]]],[11,"from_cast","","",88,[[["u128x4",6]]]],[11,"from_cast","","",88,[[["m128x4",6]]]],[11,"from_cast","","",88,[[["isizex4",6]]]],[11,"from_cast","","",88,[[["usizex4",6]]]],[11,"from_cast","","",88,[[["msizex4",6]]]],[11,"from_cast","","",89,[[["i8x4",6]]]],[11,"from_cast","","",89,[[["u8x4",6]]]],[11,"from_cast","","",89,[[["m8x4",6]]]],[11,"from_cast","","",89,[[["i16x4",6]]]],[11,"from_cast","","",89,[[["u16x4",6]]]],[11,"from_cast","","",89,[[["m16x4",6]]]],[11,"from_cast","","",89,[[["i32x4",6]]]],[11,"from_cast","","",89,[[["u32x4",6]]]],[11,"from_cast","","",89,[[["f32x4",6]]]],[11,"from_cast","","",89,[[["m32x4",6]]]],[11,"from_cast","","",89,[[["i64x4",6]]]],[11,"from_cast","","",89,[[["u64x4",6]]]],[11,"from_cast","","",89,[[["f64x4",6]]]],[11,"from_cast","","",89,[[["m64x4",6]]]],[11,"from_cast","","",89,[[["i128x4",6]]]],[11,"from_cast","","",89,[[["m128x4",6]]]],[11,"from_cast","","",89,[[["isizex4",6]]]],[11,"from_cast","","",89,[[["usizex4",6]]]],[11,"from_cast","","",89,[[["msizex4",6]]]],[11,"from_cast","","",90,[[["i8x4",6]]]],[11,"from_cast","","",90,[[["u8x4",6]]]],[11,"from_cast","","",90,[[["m8x4",6]]]],[11,"from_cast","","",90,[[["i16x4",6]]]],[11,"from_cast","","",90,[[["u16x4",6]]]],[11,"from_cast","","",90,[[["m16x4",6]]]],[11,"from_cast","","",90,[[["i32x4",6]]]],[11,"from_cast","","",90,[[["u32x4",6]]]],[11,"from_cast","","",90,[[["f32x4",6]]]],[11,"from_cast","","",90,[[["m32x4",6]]]],[11,"from_cast","","",90,[[["i64x4",6]]]],[11,"from_cast","","",90,[[["u64x4",6]]]],[11,"from_cast","","",90,[[["m64x4",6]]]],[11,"from_cast","","",90,[[["f64x4",6]]]],[11,"from_cast","","",90,[[["i128x4",6]]]],[11,"from_cast","","",90,[[["u128x4",6]]]],[11,"from_cast","","",90,[[["isizex4",6]]]],[11,"from_cast","","",90,[[["usizex4",6]]]],[11,"from_cast","","",90,[[["msizex4",6]]]],[11,"from_cast","","",91,[[["i8x8",6]]]],[11,"from_cast","","",91,[[["u8x8",6]]]],[11,"from_cast","","",91,[[["m8x8",6]]]],[11,"from_cast","","",91,[[["i16x8",6]]]],[11,"from_cast","","",91,[[["u16x8",6]]]],[11,"from_cast","","",91,[[["m16x8",6]]]],[11,"from_cast","","",91,[[["i32x8",6]]]],[11,"from_cast","","",91,[[["u32x8",6]]]],[11,"from_cast","","",91,[[["f32x8",6]]]],[11,"from_cast","","",91,[[["m32x8",6]]]],[11,"from_cast","","",91,[[["i64x8",6]]]],[11,"from_cast","","",91,[[["u64x8",6]]]],[11,"from_cast","","",91,[[["f64x8",6]]]],[11,"from_cast","","",91,[[["m64x8",6]]]],[11,"from_cast","","",91,[[["usizex8",6]]]],[11,"from_cast","","",91,[[["msizex8",6]]]],[11,"from_cast","","",92,[[["i8x8",6]]]],[11,"from_cast","","",92,[[["u8x8",6]]]],[11,"from_cast","","",92,[[["m8x8",6]]]],[11,"from_cast","","",92,[[["i16x8",6]]]],[11,"from_cast","","",92,[[["u16x8",6]]]],[11,"from_cast","","",92,[[["m16x8",6]]]],[11,"from_cast","","",92,[[["i32x8",6]]]],[11,"from_cast","","",92,[[["u32x8",6]]]],[11,"from_cast","","",92,[[["f32x8",6]]]],[11,"from_cast","","",92,[[["m32x8",6]]]],[11,"from_cast","","",92,[[["i64x8",6]]]],[11,"from_cast","","",92,[[["u64x8",6]]]],[11,"from_cast","","",92,[[["f64x8",6]]]],[11,"from_cast","","",92,[[["m64x8",6]]]],[11,"from_cast","","",92,[[["isizex8",6]]]],[11,"from_cast","","",92,[[["msizex8",6]]]],[11,"from_cast","","",93,[[["i8x8",6]]]],[11,"from_cast","","",93,[[["u8x8",6]]]],[11,"from_cast","","",93,[[["m8x8",6]]]],[11,"from_cast","","",93,[[["i16x8",6]]]],[11,"from_cast","","",93,[[["u16x8",6]]]],[11,"from_cast","","",93,[[["m16x8",6]]]],[11,"from_cast","","",93,[[["i32x8",6]]]],[11,"from_cast","","",93,[[["u32x8",6]]]],[11,"from_cast","","",93,[[["f32x8",6]]]],[11,"from_cast","","",93,[[["m32x8",6]]]],[11,"from_cast","","",93,[[["i64x8",6]]]],[11,"from_cast","","",93,[[["u64x8",6]]]],[11,"from_cast","","",93,[[["f64x8",6]]]],[11,"from_cast","","",93,[[["m64x8",6]]]],[11,"from_cast","","",93,[[["isizex8",6]]]],[11,"from_cast","","",93,[[["usizex8",6]]]],[11,"from_bits","","",15,[[["u8x2",6]]]],[11,"from_bits","","",15,[[["m8x2",6]]]],[11,"from_bits","","",16,[[["i8x2",6]]]],[11,"from_bits","","",16,[[["m8x2",6]]]],[11,"from_bits","","",18,[[["u8x4",6]]]],[11,"from_bits","","",18,[[["m8x4",6]]]],[11,"from_bits","","",18,[[["i16x2",6]]]],[11,"from_bits","","",18,[[["u16x2",6]]]],[11,"from_bits","","",18,[[["m16x2",6]]]],[11,"from_bits","","",19,[[["i8x4",6]]]],[11,"from_bits","","",19,[[["m8x4",6]]]],[11,"from_bits","","",19,[[["i16x2",6]]]],[11,"from_bits","","",19,[[["u16x2",6]]]],[11,"from_bits","","",19,[[["m16x2",6]]]],[11,"from_bits","","",20,[[["m16x2",6]]]],[11,"from_bits","","",21,[[["i8x4",6]]]],[11,"from_bits","","",21,[[["u8x4",6]]]],[11,"from_bits","","",21,[[["m8x4",6]]]],[11,"from_bits","","",21,[[["u16x2",6]]]],[11,"from_bits","","",21,[[["m16x2",6]]]],[11,"from_bits","","",22,[[["i8x4",6]]]],[11,"from_bits","","",22,[[["u8x4",6]]]],[11,"from_bits","","",22,[[["m8x4",6]]]],[11,"from_bits","","",22,[[["i16x2",6]]]],[11,"from_bits","","",22,[[["m16x2",6]]]],[11,"from_bits","","",24,[[["u8x8",6]]]],[11,"from_bits","","",24,[[["m8x8",6]]]],[11,"from_bits","","",24,[[["i16x4",6]]]],[11,"from_bits","","",24,[[["u16x4",6]]]],[11,"from_bits","","",24,[[["m16x4",6]]]],[11,"from_bits","","",24,[[["i32x2",6]]]],[11,"from_bits","","",24,[[["u32x2",6]]]],[11,"from_bits","","",24,[[["f32x2",6]]]],[11,"from_bits","","",24,[[["m32x2",6]]]],[11,"from_bits","","",25,[[["i8x8",6]]]],[11,"from_bits","","",25,[[["m8x8",6]]]],[11,"from_bits","","",25,[[["i16x4",6]]]],[11,"from_bits","","",25,[[["u16x4",6]]]],[11,"from_bits","","",25,[[["m16x4",6]]]],[11,"from_bits","","",25,[[["i32x2",6]]]],[11,"from_bits","","",25,[[["u32x2",6]]]],[11,"from_bits","","",25,[[["f32x2",6]]]],[11,"from_bits","","",25,[[["m32x2",6]]]],[11,"from_bits","","",26,[[["m16x4",6]]]],[11,"from_bits","","",26,[[["m32x2",6]]]],[11,"from_bits","","",27,[[["i8x8",6]]]],[11,"from_bits","","",27,[[["u8x8",6]]]],[11,"from_bits","","",27,[[["m8x8",6]]]],[11,"from_bits","","",27,[[["u16x4",6]]]],[11,"from_bits","","",27,[[["m16x4",6]]]],[11,"from_bits","","",27,[[["i32x2",6]]]],[11,"from_bits","","",27,[[["u32x2",6]]]],[11,"from_bits","","",27,[[["f32x2",6]]]],[11,"from_bits","","",27,[[["m32x2",6]]]],[11,"from_bits","","",28,[[["i8x8",6]]]],[11,"from_bits","","",28,[[["u8x8",6]]]],[11,"from_bits","","",28,[[["m8x8",6]]]],[11,"from_bits","","",28,[[["i16x4",6]]]],[11,"from_bits","","",28,[[["m16x4",6]]]],[11,"from_bits","","",28,[[["i32x2",6]]]],[11,"from_bits","","",28,[[["u32x2",6]]]],[11,"from_bits","","",28,[[["f32x2",6]]]],[11,"from_bits","","",28,[[["m32x2",6]]]],[11,"from_bits","","",29,[[["m32x2",6]]]],[11,"from_bits","","",30,[[["i8x8",6]]]],[11,"from_bits","","",30,[[["u8x8",6]]]],[11,"from_bits","","",30,[[["m8x8",6]]]],[11,"from_bits","","",30,[[["i16x4",6]]]],[11,"from_bits","","",30,[[["u16x4",6]]]],[11,"from_bits","","",30,[[["m16x4",6]]]],[11,"from_bits","","",30,[[["u32x2",6]]]],[11,"from_bits","","",30,[[["f32x2",6]]]],[11,"from_bits","","",30,[[["m32x2",6]]]],[11,"from_bits","","",31,[[["i8x8",6]]]],[11,"from_bits","","",31,[[["u8x8",6]]]],[11,"from_bits","","",31,[[["m8x8",6]]]],[11,"from_bits","","",31,[[["i16x4",6]]]],[11,"from_bits","","",31,[[["u16x4",6]]]],[11,"from_bits","","",31,[[["m16x4",6]]]],[11,"from_bits","","",31,[[["i32x2",6]]]],[11,"from_bits","","",31,[[["f32x2",6]]]],[11,"from_bits","","",31,[[["m32x2",6]]]],[11,"from_bits","","",32,[[["i8x8",6]]]],[11,"from_bits","","",32,[[["u8x8",6]]]],[11,"from_bits","","",32,[[["m8x8",6]]]],[11,"from_bits","","",32,[[["i16x4",6]]]],[11,"from_bits","","",32,[[["u16x4",6]]]],[11,"from_bits","","",32,[[["m16x4",6]]]],[11,"from_bits","","",32,[[["i32x2",6]]]],[11,"from_bits","","",32,[[["u32x2",6]]]],[11,"from_bits","","",32,[[["m32x2",6]]]],[11,"from_bits","","",34,[[["u8x16",6]]]],[11,"from_bits","","",34,[[["m8x16",6]]]],[11,"from_bits","","",34,[[["i16x8",6]]]],[11,"from_bits","","",34,[[["u16x8",6]]]],[11,"from_bits","","",34,[[["m16x8",6]]]],[11,"from_bits","","",34,[[["i32x4",6]]]],[11,"from_bits","","",34,[[["u32x4",6]]]],[11,"from_bits","","",34,[[["f32x4",6]]]],[11,"from_bits","","",34,[[["m32x4",6]]]],[11,"from_bits","","",34,[[["i64x2",6]]]],[11,"from_bits","","",34,[[["u64x2",6]]]],[11,"from_bits","","",34,[[["f64x2",6]]]],[11,"from_bits","","",34,[[["m64x2",6]]]],[11,"from_bits","","",34,[[["i128x1",6]]]],[11,"from_bits","","",34,[[["u128x1",6]]]],[11,"from_bits","","",34,[[["m128x1",6]]]],[11,"from_bits","","",35,[[["i8x16",6]]]],[11,"from_bits","","",35,[[["m8x16",6]]]],[11,"from_bits","","",35,[[["i16x8",6]]]],[11,"from_bits","","",35,[[["u16x8",6]]]],[11,"from_bits","","",35,[[["m16x8",6]]]],[11,"from_bits","","",35,[[["i32x4",6]]]],[11,"from_bits","","",35,[[["u32x4",6]]]],[11,"from_bits","","",35,[[["f32x4",6]]]],[11,"from_bits","","",35,[[["m32x4",6]]]],[11,"from_bits","","",35,[[["i64x2",6]]]],[11,"from_bits","","",35,[[["u64x2",6]]]],[11,"from_bits","","",35,[[["f64x2",6]]]],[11,"from_bits","","",35,[[["m64x2",6]]]],[11,"from_bits","","",35,[[["i128x1",6]]]],[11,"from_bits","","",35,[[["u128x1",6]]]],[11,"from_bits","","",35,[[["m128x1",6]]]],[11,"from_bits","","",36,[[["m16x8",6]]]],[11,"from_bits","","",36,[[["m32x4",6]]]],[11,"from_bits","","",36,[[["m64x2",6]]]],[11,"from_bits","","",36,[[["m128x1",6]]]],[11,"from_bits","","",37,[[["i8x16",6]]]],[11,"from_bits","","",37,[[["u8x16",6]]]],[11,"from_bits","","",37,[[["m8x16",6]]]],[11,"from_bits","","",37,[[["u16x8",6]]]],[11,"from_bits","","",37,[[["m16x8",6]]]],[11,"from_bits","","",37,[[["i32x4",6]]]],[11,"from_bits","","",37,[[["u32x4",6]]]],[11,"from_bits","","",37,[[["f32x4",6]]]],[11,"from_bits","","",37,[[["m32x4",6]]]],[11,"from_bits","","",37,[[["i64x2",6]]]],[11,"from_bits","","",37,[[["u64x2",6]]]],[11,"from_bits","","",37,[[["f64x2",6]]]],[11,"from_bits","","",37,[[["m64x2",6]]]],[11,"from_bits","","",37,[[["i128x1",6]]]],[11,"from_bits","","",37,[[["u128x1",6]]]],[11,"from_bits","","",37,[[["m128x1",6]]]],[11,"from_bits","","",38,[[["i8x16",6]]]],[11,"from_bits","","",38,[[["u8x16",6]]]],[11,"from_bits","","",38,[[["m8x16",6]]]],[11,"from_bits","","",38,[[["i16x8",6]]]],[11,"from_bits","","",38,[[["m16x8",6]]]],[11,"from_bits","","",38,[[["i32x4",6]]]],[11,"from_bits","","",38,[[["u32x4",6]]]],[11,"from_bits","","",38,[[["f32x4",6]]]],[11,"from_bits","","",38,[[["m32x4",6]]]],[11,"from_bits","","",38,[[["i64x2",6]]]],[11,"from_bits","","",38,[[["u64x2",6]]]],[11,"from_bits","","",38,[[["f64x2",6]]]],[11,"from_bits","","",38,[[["m64x2",6]]]],[11,"from_bits","","",38,[[["i128x1",6]]]],[11,"from_bits","","",38,[[["u128x1",6]]]],[11,"from_bits","","",38,[[["m128x1",6]]]],[11,"from_bits","","",39,[[["m32x4",6]]]],[11,"from_bits","","",39,[[["m64x2",6]]]],[11,"from_bits","","",39,[[["m128x1",6]]]],[11,"from_bits","","",40,[[["i8x16",6]]]],[11,"from_bits","","",40,[[["u8x16",6]]]],[11,"from_bits","","",40,[[["m8x16",6]]]],[11,"from_bits","","",40,[[["i16x8",6]]]],[11,"from_bits","","",40,[[["u16x8",6]]]],[11,"from_bits","","",40,[[["m16x8",6]]]],[11,"from_bits","","",40,[[["u32x4",6]]]],[11,"from_bits","","",40,[[["f32x4",6]]]],[11,"from_bits","","",40,[[["m32x4",6]]]],[11,"from_bits","","",40,[[["i64x2",6]]]],[11,"from_bits","","",40,[[["u64x2",6]]]],[11,"from_bits","","",40,[[["f64x2",6]]]],[11,"from_bits","","",40,[[["m64x2",6]]]],[11,"from_bits","","",40,[[["i128x1",6]]]],[11,"from_bits","","",40,[[["u128x1",6]]]],[11,"from_bits","","",40,[[["m128x1",6]]]],[11,"from_bits","","",41,[[["i8x16",6]]]],[11,"from_bits","","",41,[[["u8x16",6]]]],[11,"from_bits","","",41,[[["m8x16",6]]]],[11,"from_bits","","",41,[[["i16x8",6]]]],[11,"from_bits","","",41,[[["u16x8",6]]]],[11,"from_bits","","",41,[[["m16x8",6]]]],[11,"from_bits","","",41,[[["i32x4",6]]]],[11,"from_bits","","",41,[[["f32x4",6]]]],[11,"from_bits","","",41,[[["m32x4",6]]]],[11,"from_bits","","",41,[[["i64x2",6]]]],[11,"from_bits","","",41,[[["u64x2",6]]]],[11,"from_bits","","",41,[[["f64x2",6]]]],[11,"from_bits","","",41,[[["m64x2",6]]]],[11,"from_bits","","",41,[[["i128x1",6]]]],[11,"from_bits","","",41,[[["u128x1",6]]]],[11,"from_bits","","",41,[[["m128x1",6]]]],[11,"from_bits","","",42,[[["i8x16",6]]]],[11,"from_bits","","",42,[[["u8x16",6]]]],[11,"from_bits","","",42,[[["m8x16",6]]]],[11,"from_bits","","",42,[[["i16x8",6]]]],[11,"from_bits","","",42,[[["u16x8",6]]]],[11,"from_bits","","",42,[[["m16x8",6]]]],[11,"from_bits","","",42,[[["i32x4",6]]]],[11,"from_bits","","",42,[[["u32x4",6]]]],[11,"from_bits","","",42,[[["m32x4",6]]]],[11,"from_bits","","",42,[[["i64x2",6]]]],[11,"from_bits","","",42,[[["u64x2",6]]]],[11,"from_bits","","",42,[[["f64x2",6]]]],[11,"from_bits","","",42,[[["m64x2",6]]]],[11,"from_bits","","",42,[[["i128x1",6]]]],[11,"from_bits","","",42,[[["u128x1",6]]]],[11,"from_bits","","",42,[[["m128x1",6]]]],[11,"from_bits","","",43,[[["m64x2",6]]]],[11,"from_bits","","",43,[[["m128x1",6]]]],[11,"from_bits","","",44,[[["i8x16",6]]]],[11,"from_bits","","",44,[[["u8x16",6]]]],[11,"from_bits","","",44,[[["m8x16",6]]]],[11,"from_bits","","",44,[[["i16x8",6]]]],[11,"from_bits","","",44,[[["u16x8",6]]]],[11,"from_bits","","",44,[[["m16x8",6]]]],[11,"from_bits","","",44,[[["i32x4",6]]]],[11,"from_bits","","",44,[[["u32x4",6]]]],[11,"from_bits","","",44,[[["f32x4",6]]]],[11,"from_bits","","",44,[[["m32x4",6]]]],[11,"from_bits","","",44,[[["u64x2",6]]]],[11,"from_bits","","",44,[[["f64x2",6]]]],[11,"from_bits","","",44,[[["m64x2",6]]]],[11,"from_bits","","",44,[[["i128x1",6]]]],[11,"from_bits","","",44,[[["u128x1",6]]]],[11,"from_bits","","",44,[[["m128x1",6]]]],[11,"from_bits","","",45,[[["i8x16",6]]]],[11,"from_bits","","",45,[[["u8x16",6]]]],[11,"from_bits","","",45,[[["m8x16",6]]]],[11,"from_bits","","",45,[[["i16x8",6]]]],[11,"from_bits","","",45,[[["u16x8",6]]]],[11,"from_bits","","",45,[[["m16x8",6]]]],[11,"from_bits","","",45,[[["i32x4",6]]]],[11,"from_bits","","",45,[[["u32x4",6]]]],[11,"from_bits","","",45,[[["f32x4",6]]]],[11,"from_bits","","",45,[[["m32x4",6]]]],[11,"from_bits","","",45,[[["i64x2",6]]]],[11,"from_bits","","",45,[[["f64x2",6]]]],[11,"from_bits","","",45,[[["m64x2",6]]]],[11,"from_bits","","",45,[[["i128x1",6]]]],[11,"from_bits","","",45,[[["u128x1",6]]]],[11,"from_bits","","",45,[[["m128x1",6]]]],[11,"from_bits","","",46,[[["i8x16",6]]]],[11,"from_bits","","",46,[[["u8x16",6]]]],[11,"from_bits","","",46,[[["m8x16",6]]]],[11,"from_bits","","",46,[[["i16x8",6]]]],[11,"from_bits","","",46,[[["u16x8",6]]]],[11,"from_bits","","",46,[[["m16x8",6]]]],[11,"from_bits","","",46,[[["i32x4",6]]]],[11,"from_bits","","",46,[[["u32x4",6]]]],[11,"from_bits","","",46,[[["f32x4",6]]]],[11,"from_bits","","",46,[[["m32x4",6]]]],[11,"from_bits","","",46,[[["i64x2",6]]]],[11,"from_bits","","",46,[[["u64x2",6]]]],[11,"from_bits","","",46,[[["m64x2",6]]]],[11,"from_bits","","",46,[[["i128x1",6]]]],[11,"from_bits","","",46,[[["u128x1",6]]]],[11,"from_bits","","",46,[[["m128x1",6]]]],[11,"from_bits","","",47,[[["m128x1",6]]]],[11,"from_bits","","",51,[[["i8x16",6]]]],[11,"from_bits","","",51,[[["u8x16",6]]]],[11,"from_bits","","",51,[[["m8x16",6]]]],[11,"from_bits","","",51,[[["i16x8",6]]]],[11,"from_bits","","",51,[[["u16x8",6]]]],[11,"from_bits","","",51,[[["m16x8",6]]]],[11,"from_bits","","",51,[[["i32x4",6]]]],[11,"from_bits","","",51,[[["u32x4",6]]]],[11,"from_bits","","",51,[[["f32x4",6]]]],[11,"from_bits","","",51,[[["m32x4",6]]]],[11,"from_bits","","",51,[[["i64x2",6]]]],[11,"from_bits","","",51,[[["u64x2",6]]]],[11,"from_bits","","",51,[[["f64x2",6]]]],[11,"from_bits","","",51,[[["m64x2",6]]]],[11,"from_bits","","",51,[[["u128x1",6]]]],[11,"from_bits","","",51,[[["m128x1",6]]]],[11,"from_bits","","",52,[[["i8x16",6]]]],[11,"from_bits","","",52,[[["u8x16",6]]]],[11,"from_bits","","",52,[[["m8x16",6]]]],[11,"from_bits","","",52,[[["i16x8",6]]]],[11,"from_bits","","",52,[[["u16x8",6]]]],[11,"from_bits","","",52,[[["m16x8",6]]]],[11,"from_bits","","",52,[[["i32x4",6]]]],[11,"from_bits","","",52,[[["u32x4",6]]]],[11,"from_bits","","",52,[[["f32x4",6]]]],[11,"from_bits","","",52,[[["m32x4",6]]]],[11,"from_bits","","",52,[[["i64x2",6]]]],[11,"from_bits","","",52,[[["u64x2",6]]]],[11,"from_bits","","",52,[[["f64x2",6]]]],[11,"from_bits","","",52,[[["m64x2",6]]]],[11,"from_bits","","",52,[[["i128x1",6]]]],[11,"from_bits","","",52,[[["m128x1",6]]]],[11,"from_bits","","",54,[[["u8x32",6]]]],[11,"from_bits","","",54,[[["m8x32",6]]]],[11,"from_bits","","",54,[[["i16x16",6]]]],[11,"from_bits","","",54,[[["u16x16",6]]]],[11,"from_bits","","",54,[[["m16x16",6]]]],[11,"from_bits","","",54,[[["i32x8",6]]]],[11,"from_bits","","",54,[[["u32x8",6]]]],[11,"from_bits","","",54,[[["f32x8",6]]]],[11,"from_bits","","",54,[[["m32x8",6]]]],[11,"from_bits","","",54,[[["i64x4",6]]]],[11,"from_bits","","",54,[[["u64x4",6]]]],[11,"from_bits","","",54,[[["f64x4",6]]]],[11,"from_bits","","",54,[[["m64x4",6]]]],[11,"from_bits","","",54,[[["i128x2",6]]]],[11,"from_bits","","",54,[[["u128x2",6]]]],[11,"from_bits","","",54,[[["m128x2",6]]]],[11,"from_bits","","",55,[[["i8x32",6]]]],[11,"from_bits","","",55,[[["m8x32",6]]]],[11,"from_bits","","",55,[[["i16x16",6]]]],[11,"from_bits","","",55,[[["u16x16",6]]]],[11,"from_bits","","",55,[[["m16x16",6]]]],[11,"from_bits","","",55,[[["i32x8",6]]]],[11,"from_bits","","",55,[[["u32x8",6]]]],[11,"from_bits","","",55,[[["f32x8",6]]]],[11,"from_bits","","",55,[[["m32x8",6]]]],[11,"from_bits","","",55,[[["i64x4",6]]]],[11,"from_bits","","",55,[[["u64x4",6]]]],[11,"from_bits","","",55,[[["f64x4",6]]]],[11,"from_bits","","",55,[[["m64x4",6]]]],[11,"from_bits","","",55,[[["i128x2",6]]]],[11,"from_bits","","",55,[[["u128x2",6]]]],[11,"from_bits","","",55,[[["m128x2",6]]]],[11,"from_bits","","",56,[[["m16x16",6]]]],[11,"from_bits","","",56,[[["m32x8",6]]]],[11,"from_bits","","",56,[[["m64x4",6]]]],[11,"from_bits","","",56,[[["m128x2",6]]]],[11,"from_bits","","",57,[[["i8x32",6]]]],[11,"from_bits","","",57,[[["u8x32",6]]]],[11,"from_bits","","",57,[[["m8x32",6]]]],[11,"from_bits","","",57,[[["u16x16",6]]]],[11,"from_bits","","",57,[[["m16x16",6]]]],[11,"from_bits","","",57,[[["i32x8",6]]]],[11,"from_bits","","",57,[[["u32x8",6]]]],[11,"from_bits","","",57,[[["f32x8",6]]]],[11,"from_bits","","",57,[[["m32x8",6]]]],[11,"from_bits","","",57,[[["i64x4",6]]]],[11,"from_bits","","",57,[[["u64x4",6]]]],[11,"from_bits","","",57,[[["f64x4",6]]]],[11,"from_bits","","",57,[[["m64x4",6]]]],[11,"from_bits","","",57,[[["i128x2",6]]]],[11,"from_bits","","",57,[[["u128x2",6]]]],[11,"from_bits","","",57,[[["m128x2",6]]]],[11,"from_bits","","",58,[[["i8x32",6]]]],[11,"from_bits","","",58,[[["u8x32",6]]]],[11,"from_bits","","",58,[[["m8x32",6]]]],[11,"from_bits","","",58,[[["i16x16",6]]]],[11,"from_bits","","",58,[[["m16x16",6]]]],[11,"from_bits","","",58,[[["i32x8",6]]]],[11,"from_bits","","",58,[[["u32x8",6]]]],[11,"from_bits","","",58,[[["f32x8",6]]]],[11,"from_bits","","",58,[[["m32x8",6]]]],[11,"from_bits","","",58,[[["i64x4",6]]]],[11,"from_bits","","",58,[[["u64x4",6]]]],[11,"from_bits","","",58,[[["f64x4",6]]]],[11,"from_bits","","",58,[[["m64x4",6]]]],[11,"from_bits","","",58,[[["i128x2",6]]]],[11,"from_bits","","",58,[[["u128x2",6]]]],[11,"from_bits","","",58,[[["m128x2",6]]]],[11,"from_bits","","",59,[[["m32x8",6]]]],[11,"from_bits","","",59,[[["m64x4",6]]]],[11,"from_bits","","",59,[[["m128x2",6]]]],[11,"from_bits","","",60,[[["i8x32",6]]]],[11,"from_bits","","",60,[[["u8x32",6]]]],[11,"from_bits","","",60,[[["m8x32",6]]]],[11,"from_bits","","",60,[[["i16x16",6]]]],[11,"from_bits","","",60,[[["u16x16",6]]]],[11,"from_bits","","",60,[[["m16x16",6]]]],[11,"from_bits","","",60,[[["u32x8",6]]]],[11,"from_bits","","",60,[[["f32x8",6]]]],[11,"from_bits","","",60,[[["m32x8",6]]]],[11,"from_bits","","",60,[[["i64x4",6]]]],[11,"from_bits","","",60,[[["u64x4",6]]]],[11,"from_bits","","",60,[[["f64x4",6]]]],[11,"from_bits","","",60,[[["m64x4",6]]]],[11,"from_bits","","",60,[[["i128x2",6]]]],[11,"from_bits","","",60,[[["u128x2",6]]]],[11,"from_bits","","",60,[[["m128x2",6]]]],[11,"from_bits","","",61,[[["i8x32",6]]]],[11,"from_bits","","",61,[[["u8x32",6]]]],[11,"from_bits","","",61,[[["m8x32",6]]]],[11,"from_bits","","",61,[[["i16x16",6]]]],[11,"from_bits","","",61,[[["u16x16",6]]]],[11,"from_bits","","",61,[[["m16x16",6]]]],[11,"from_bits","","",61,[[["i32x8",6]]]],[11,"from_bits","","",61,[[["f32x8",6]]]],[11,"from_bits","","",61,[[["m32x8",6]]]],[11,"from_bits","","",61,[[["i64x4",6]]]],[11,"from_bits","","",61,[[["u64x4",6]]]],[11,"from_bits","","",61,[[["f64x4",6]]]],[11,"from_bits","","",61,[[["m64x4",6]]]],[11,"from_bits","","",61,[[["i128x2",6]]]],[11,"from_bits","","",61,[[["u128x2",6]]]],[11,"from_bits","","",61,[[["m128x2",6]]]],[11,"from_bits","","",62,[[["i8x32",6]]]],[11,"from_bits","","",62,[[["u8x32",6]]]],[11,"from_bits","","",62,[[["m8x32",6]]]],[11,"from_bits","","",62,[[["i16x16",6]]]],[11,"from_bits","","",62,[[["u16x16",6]]]],[11,"from_bits","","",62,[[["m16x16",6]]]],[11,"from_bits","","",62,[[["i32x8",6]]]],[11,"from_bits","","",62,[[["u32x8",6]]]],[11,"from_bits","","",62,[[["m32x8",6]]]],[11,"from_bits","","",62,[[["i64x4",6]]]],[11,"from_bits","","",62,[[["u64x4",6]]]],[11,"from_bits","","",62,[[["f64x4",6]]]],[11,"from_bits","","",62,[[["m64x4",6]]]],[11,"from_bits","","",62,[[["i128x2",6]]]],[11,"from_bits","","",62,[[["u128x2",6]]]],[11,"from_bits","","",62,[[["m128x2",6]]]],[11,"from_bits","","",63,[[["m64x4",6]]]],[11,"from_bits","","",63,[[["m128x2",6]]]],[11,"from_bits","","",64,[[["i8x32",6]]]],[11,"from_bits","","",64,[[["u8x32",6]]]],[11,"from_bits","","",64,[[["m8x32",6]]]],[11,"from_bits","","",64,[[["i16x16",6]]]],[11,"from_bits","","",64,[[["u16x16",6]]]],[11,"from_bits","","",64,[[["m16x16",6]]]],[11,"from_bits","","",64,[[["i32x8",6]]]],[11,"from_bits","","",64,[[["u32x8",6]]]],[11,"from_bits","","",64,[[["f32x8",6]]]],[11,"from_bits","","",64,[[["m32x8",6]]]],[11,"from_bits","","",64,[[["u64x4",6]]]],[11,"from_bits","","",64,[[["f64x4",6]]]],[11,"from_bits","","",64,[[["m64x4",6]]]],[11,"from_bits","","",64,[[["i128x2",6]]]],[11,"from_bits","","",64,[[["u128x2",6]]]],[11,"from_bits","","",64,[[["m128x2",6]]]],[11,"from_bits","","",65,[[["i8x32",6]]]],[11,"from_bits","","",65,[[["u8x32",6]]]],[11,"from_bits","","",65,[[["m8x32",6]]]],[11,"from_bits","","",65,[[["i16x16",6]]]],[11,"from_bits","","",65,[[["u16x16",6]]]],[11,"from_bits","","",65,[[["m16x16",6]]]],[11,"from_bits","","",65,[[["i32x8",6]]]],[11,"from_bits","","",65,[[["u32x8",6]]]],[11,"from_bits","","",65,[[["f32x8",6]]]],[11,"from_bits","","",65,[[["m32x8",6]]]],[11,"from_bits","","",65,[[["i64x4",6]]]],[11,"from_bits","","",65,[[["f64x4",6]]]],[11,"from_bits","","",65,[[["m64x4",6]]]],[11,"from_bits","","",65,[[["i128x2",6]]]],[11,"from_bits","","",65,[[["u128x2",6]]]],[11,"from_bits","","",65,[[["m128x2",6]]]],[11,"from_bits","","",66,[[["i8x32",6]]]],[11,"from_bits","","",66,[[["u8x32",6]]]],[11,"from_bits","","",66,[[["m8x32",6]]]],[11,"from_bits","","",66,[[["i16x16",6]]]],[11,"from_bits","","",66,[[["u16x16",6]]]],[11,"from_bits","","",66,[[["m16x16",6]]]],[11,"from_bits","","",66,[[["i32x8",6]]]],[11,"from_bits","","",66,[[["u32x8",6]]]],[11,"from_bits","","",66,[[["f32x8",6]]]],[11,"from_bits","","",66,[[["m32x8",6]]]],[11,"from_bits","","",66,[[["i64x4",6]]]],[11,"from_bits","","",66,[[["u64x4",6]]]],[11,"from_bits","","",66,[[["m64x4",6]]]],[11,"from_bits","","",66,[[["i128x2",6]]]],[11,"from_bits","","",66,[[["u128x2",6]]]],[11,"from_bits","","",66,[[["m128x2",6]]]],[11,"from_bits","","",67,[[["m128x2",6]]]],[11,"from_bits","","",68,[[["i8x32",6]]]],[11,"from_bits","","",68,[[["u8x32",6]]]],[11,"from_bits","","",68,[[["m8x32",6]]]],[11,"from_bits","","",68,[[["i16x16",6]]]],[11,"from_bits","","",68,[[["u16x16",6]]]],[11,"from_bits","","",68,[[["m16x16",6]]]],[11,"from_bits","","",68,[[["i32x8",6]]]],[11,"from_bits","","",68,[[["u32x8",6]]]],[11,"from_bits","","",68,[[["f32x8",6]]]],[11,"from_bits","","",68,[[["m32x8",6]]]],[11,"from_bits","","",68,[[["i64x4",6]]]],[11,"from_bits","","",68,[[["u64x4",6]]]],[11,"from_bits","","",68,[[["f64x4",6]]]],[11,"from_bits","","",68,[[["m64x4",6]]]],[11,"from_bits","","",68,[[["u128x2",6]]]],[11,"from_bits","","",68,[[["m128x2",6]]]],[11,"from_bits","","",69,[[["i8x32",6]]]],[11,"from_bits","","",69,[[["u8x32",6]]]],[11,"from_bits","","",69,[[["m8x32",6]]]],[11,"from_bits","","",69,[[["i16x16",6]]]],[11,"from_bits","","",69,[[["u16x16",6]]]],[11,"from_bits","","",69,[[["m16x16",6]]]],[11,"from_bits","","",69,[[["i32x8",6]]]],[11,"from_bits","","",69,[[["u32x8",6]]]],[11,"from_bits","","",69,[[["f32x8",6]]]],[11,"from_bits","","",69,[[["m32x8",6]]]],[11,"from_bits","","",69,[[["i64x4",6]]]],[11,"from_bits","","",69,[[["u64x4",6]]]],[11,"from_bits","","",69,[[["f64x4",6]]]],[11,"from_bits","","",69,[[["m64x4",6]]]],[11,"from_bits","","",69,[[["i128x2",6]]]],[11,"from_bits","","",69,[[["m128x2",6]]]],[11,"from_bits","","",74,[[["u8x64",6]]]],[11,"from_bits","","",74,[[["m8x64",6]]]],[11,"from_bits","","",74,[[["i16x32",6]]]],[11,"from_bits","","",74,[[["u16x32",6]]]],[11,"from_bits","","",74,[[["m16x32",6]]]],[11,"from_bits","","",74,[[["i32x16",6]]]],[11,"from_bits","","",74,[[["u32x16",6]]]],[11,"from_bits","","",74,[[["f32x16",6]]]],[11,"from_bits","","",74,[[["m32x16",6]]]],[11,"from_bits","","",74,[[["i64x8",6]]]],[11,"from_bits","","",74,[[["u64x8",6]]]],[11,"from_bits","","",74,[[["f64x8",6]]]],[11,"from_bits","","",74,[[["m64x8",6]]]],[11,"from_bits","","",74,[[["i128x4",6]]]],[11,"from_bits","","",74,[[["u128x4",6]]]],[11,"from_bits","","",74,[[["m128x4",6]]]],[11,"from_bits","","",75,[[["i8x64",6]]]],[11,"from_bits","","",75,[[["m8x64",6]]]],[11,"from_bits","","",75,[[["i16x32",6]]]],[11,"from_bits","","",75,[[["u16x32",6]]]],[11,"from_bits","","",75,[[["m16x32",6]]]],[11,"from_bits","","",75,[[["i32x16",6]]]],[11,"from_bits","","",75,[[["u32x16",6]]]],[11,"from_bits","","",75,[[["f32x16",6]]]],[11,"from_bits","","",75,[[["m32x16",6]]]],[11,"from_bits","","",75,[[["i64x8",6]]]],[11,"from_bits","","",75,[[["u64x8",6]]]],[11,"from_bits","","",75,[[["f64x8",6]]]],[11,"from_bits","","",75,[[["m64x8",6]]]],[11,"from_bits","","",75,[[["i128x4",6]]]],[11,"from_bits","","",75,[[["u128x4",6]]]],[11,"from_bits","","",75,[[["m128x4",6]]]],[11,"from_bits","","",76,[[["m16x32",6]]]],[11,"from_bits","","",76,[[["m32x16",6]]]],[11,"from_bits","","",76,[[["m64x8",6]]]],[11,"from_bits","","",76,[[["m128x4",6]]]],[11,"from_bits","","",77,[[["i8x64",6]]]],[11,"from_bits","","",77,[[["u8x64",6]]]],[11,"from_bits","","",77,[[["m8x64",6]]]],[11,"from_bits","","",77,[[["u16x32",6]]]],[11,"from_bits","","",77,[[["m16x32",6]]]],[11,"from_bits","","",77,[[["i32x16",6]]]],[11,"from_bits","","",77,[[["u32x16",6]]]],[11,"from_bits","","",77,[[["f32x16",6]]]],[11,"from_bits","","",77,[[["m32x16",6]]]],[11,"from_bits","","",77,[[["i64x8",6]]]],[11,"from_bits","","",77,[[["u64x8",6]]]],[11,"from_bits","","",77,[[["f64x8",6]]]],[11,"from_bits","","",77,[[["m64x8",6]]]],[11,"from_bits","","",77,[[["i128x4",6]]]],[11,"from_bits","","",77,[[["u128x4",6]]]],[11,"from_bits","","",77,[[["m128x4",6]]]],[11,"from_bits","","",78,[[["i8x64",6]]]],[11,"from_bits","","",78,[[["u8x64",6]]]],[11,"from_bits","","",78,[[["m8x64",6]]]],[11,"from_bits","","",78,[[["i16x32",6]]]],[11,"from_bits","","",78,[[["m16x32",6]]]],[11,"from_bits","","",78,[[["i32x16",6]]]],[11,"from_bits","","",78,[[["u32x16",6]]]],[11,"from_bits","","",78,[[["f32x16",6]]]],[11,"from_bits","","",78,[[["m32x16",6]]]],[11,"from_bits","","",78,[[["i64x8",6]]]],[11,"from_bits","","",78,[[["u64x8",6]]]],[11,"from_bits","","",78,[[["f64x8",6]]]],[11,"from_bits","","",78,[[["m64x8",6]]]],[11,"from_bits","","",78,[[["i128x4",6]]]],[11,"from_bits","","",78,[[["u128x4",6]]]],[11,"from_bits","","",78,[[["m128x4",6]]]],[11,"from_bits","","",79,[[["m32x16",6]]]],[11,"from_bits","","",79,[[["m64x8",6]]]],[11,"from_bits","","",79,[[["m128x4",6]]]],[11,"from_bits","","",80,[[["i8x64",6]]]],[11,"from_bits","","",80,[[["u8x64",6]]]],[11,"from_bits","","",80,[[["m8x64",6]]]],[11,"from_bits","","",80,[[["i16x32",6]]]],[11,"from_bits","","",80,[[["u16x32",6]]]],[11,"from_bits","","",80,[[["m16x32",6]]]],[11,"from_bits","","",80,[[["u32x16",6]]]],[11,"from_bits","","",80,[[["f32x16",6]]]],[11,"from_bits","","",80,[[["m32x16",6]]]],[11,"from_bits","","",80,[[["i64x8",6]]]],[11,"from_bits","","",80,[[["u64x8",6]]]],[11,"from_bits","","",80,[[["f64x8",6]]]],[11,"from_bits","","",80,[[["m64x8",6]]]],[11,"from_bits","","",80,[[["i128x4",6]]]],[11,"from_bits","","",80,[[["u128x4",6]]]],[11,"from_bits","","",80,[[["m128x4",6]]]],[11,"from_bits","","",81,[[["i8x64",6]]]],[11,"from_bits","","",81,[[["u8x64",6]]]],[11,"from_bits","","",81,[[["m8x64",6]]]],[11,"from_bits","","",81,[[["i16x32",6]]]],[11,"from_bits","","",81,[[["u16x32",6]]]],[11,"from_bits","","",81,[[["m16x32",6]]]],[11,"from_bits","","",81,[[["i32x16",6]]]],[11,"from_bits","","",81,[[["f32x16",6]]]],[11,"from_bits","","",81,[[["m32x16",6]]]],[11,"from_bits","","",81,[[["i64x8",6]]]],[11,"from_bits","","",81,[[["u64x8",6]]]],[11,"from_bits","","",81,[[["f64x8",6]]]],[11,"from_bits","","",81,[[["m64x8",6]]]],[11,"from_bits","","",81,[[["i128x4",6]]]],[11,"from_bits","","",81,[[["u128x4",6]]]],[11,"from_bits","","",81,[[["m128x4",6]]]],[11,"from_bits","","",82,[[["i8x64",6]]]],[11,"from_bits","","",82,[[["u8x64",6]]]],[11,"from_bits","","",82,[[["m8x64",6]]]],[11,"from_bits","","",82,[[["i16x32",6]]]],[11,"from_bits","","",82,[[["u16x32",6]]]],[11,"from_bits","","",82,[[["m16x32",6]]]],[11,"from_bits","","",82,[[["i32x16",6]]]],[11,"from_bits","","",82,[[["u32x16",6]]]],[11,"from_bits","","",82,[[["m32x16",6]]]],[11,"from_bits","","",82,[[["i64x8",6]]]],[11,"from_bits","","",82,[[["u64x8",6]]]],[11,"from_bits","","",82,[[["f64x8",6]]]],[11,"from_bits","","",82,[[["m64x8",6]]]],[11,"from_bits","","",82,[[["i128x4",6]]]],[11,"from_bits","","",82,[[["u128x4",6]]]],[11,"from_bits","","",82,[[["m128x4",6]]]],[11,"from_bits","","",83,[[["m64x8",6]]]],[11,"from_bits","","",83,[[["m128x4",6]]]],[11,"from_bits","","",84,[[["i8x64",6]]]],[11,"from_bits","","",84,[[["u8x64",6]]]],[11,"from_bits","","",84,[[["m8x64",6]]]],[11,"from_bits","","",84,[[["i16x32",6]]]],[11,"from_bits","","",84,[[["u16x32",6]]]],[11,"from_bits","","",84,[[["m16x32",6]]]],[11,"from_bits","","",84,[[["i32x16",6]]]],[11,"from_bits","","",84,[[["u32x16",6]]]],[11,"from_bits","","",84,[[["f32x16",6]]]],[11,"from_bits","","",84,[[["m32x16",6]]]],[11,"from_bits","","",84,[[["u64x8",6]]]],[11,"from_bits","","",84,[[["f64x8",6]]]],[11,"from_bits","","",84,[[["m64x8",6]]]],[11,"from_bits","","",84,[[["i128x4",6]]]],[11,"from_bits","","",84,[[["u128x4",6]]]],[11,"from_bits","","",84,[[["m128x4",6]]]],[11,"from_bits","","",85,[[["i8x64",6]]]],[11,"from_bits","","",85,[[["u8x64",6]]]],[11,"from_bits","","",85,[[["m8x64",6]]]],[11,"from_bits","","",85,[[["i16x32",6]]]],[11,"from_bits","","",85,[[["u16x32",6]]]],[11,"from_bits","","",85,[[["m16x32",6]]]],[11,"from_bits","","",85,[[["i32x16",6]]]],[11,"from_bits","","",85,[[["u32x16",6]]]],[11,"from_bits","","",85,[[["f32x16",6]]]],[11,"from_bits","","",85,[[["m32x16",6]]]],[11,"from_bits","","",85,[[["i64x8",6]]]],[11,"from_bits","","",85,[[["f64x8",6]]]],[11,"from_bits","","",85,[[["m64x8",6]]]],[11,"from_bits","","",85,[[["i128x4",6]]]],[11,"from_bits","","",85,[[["u128x4",6]]]],[11,"from_bits","","",85,[[["m128x4",6]]]],[11,"from_bits","","",86,[[["i8x64",6]]]],[11,"from_bits","","",86,[[["u8x64",6]]]],[11,"from_bits","","",86,[[["m8x64",6]]]],[11,"from_bits","","",86,[[["i16x32",6]]]],[11,"from_bits","","",86,[[["u16x32",6]]]],[11,"from_bits","","",86,[[["m16x32",6]]]],[11,"from_bits","","",86,[[["i32x16",6]]]],[11,"from_bits","","",86,[[["u32x16",6]]]],[11,"from_bits","","",86,[[["f32x16",6]]]],[11,"from_bits","","",86,[[["m32x16",6]]]],[11,"from_bits","","",86,[[["i64x8",6]]]],[11,"from_bits","","",86,[[["u64x8",6]]]],[11,"from_bits","","",86,[[["m64x8",6]]]],[11,"from_bits","","",86,[[["i128x4",6]]]],[11,"from_bits","","",86,[[["u128x4",6]]]],[11,"from_bits","","",86,[[["m128x4",6]]]],[11,"from_bits","","",87,[[["m128x4",6]]]],[11,"from_bits","","",88,[[["i8x64",6]]]],[11,"from_bits","","",88,[[["u8x64",6]]]],[11,"from_bits","","",88,[[["m8x64",6]]]],[11,"from_bits","","",88,[[["i16x32",6]]]],[11,"from_bits","","",88,[[["u16x32",6]]]],[11,"from_bits","","",88,[[["m16x32",6]]]],[11,"from_bits","","",88,[[["i32x16",6]]]],[11,"from_bits","","",88,[[["u32x16",6]]]],[11,"from_bits","","",88,[[["f32x16",6]]]],[11,"from_bits","","",88,[[["m32x16",6]]]],[11,"from_bits","","",88,[[["i64x8",6]]]],[11,"from_bits","","",88,[[["u64x8",6]]]],[11,"from_bits","","",88,[[["f64x8",6]]]],[11,"from_bits","","",88,[[["m64x8",6]]]],[11,"from_bits","","",88,[[["u128x4",6]]]],[11,"from_bits","","",88,[[["m128x4",6]]]],[11,"from_bits","","",89,[[["i8x64",6]]]],[11,"from_bits","","",89,[[["u8x64",6]]]],[11,"from_bits","","",89,[[["m8x64",6]]]],[11,"from_bits","","",89,[[["i16x32",6]]]],[11,"from_bits","","",89,[[["u16x32",6]]]],[11,"from_bits","","",89,[[["m16x32",6]]]],[11,"from_bits","","",89,[[["i32x16",6]]]],[11,"from_bits","","",89,[[["u32x16",6]]]],[11,"from_bits","","",89,[[["f32x16",6]]]],[11,"from_bits","","",89,[[["m32x16",6]]]],[11,"from_bits","","",89,[[["i64x8",6]]]],[11,"from_bits","","",89,[[["u64x8",6]]]],[11,"from_bits","","",89,[[["f64x8",6]]]],[11,"from_bits","","",89,[[["m64x8",6]]]],[11,"from_bits","","",89,[[["i128x4",6]]]],[11,"from_bits","","",89,[[["m128x4",6]]]],[11,"from_bits","","",24,[[["__m64",3]]]],[11,"from_bits","","",25,[[["__m64",3]]]],[11,"from_bits","","",27,[[["__m64",3]]]],[11,"from_bits","","",28,[[["__m64",3]]]],[11,"from_bits","","",30,[[["__m64",3]]]],[11,"from_bits","","",31,[[["__m64",3]]]],[11,"from_bits","","",32,[[["__m64",3]]]],[11,"from_bits","","",34,[[["__m128",3]]]],[11,"from_bits","","",35,[[["__m128",3]]]],[11,"from_bits","","",37,[[["__m128",3]]]],[11,"from_bits","","",38,[[["__m128",3]]]],[11,"from_bits","","",40,[[["__m128",3]]]],[11,"from_bits","","",41,[[["__m128",3]]]],[11,"from_bits","","",42,[[["__m128",3]]]],[11,"from_bits","","",44,[[["__m128",3]]]],[11,"from_bits","","",45,[[["__m128",3]]]],[11,"from_bits","","",46,[[["__m128",3]]]],[11,"from_bits","","",51,[[["__m128",3]]]],[11,"from_bits","","",52,[[["__m128",3]]]],[11,"from_bits","","",34,[[["__m128i",3]]]],[11,"from_bits","","",35,[[["__m128i",3]]]],[11,"from_bits","","",37,[[["__m128i",3]]]],[11,"from_bits","","",38,[[["__m128i",3]]]],[11,"from_bits","","",40,[[["__m128i",3]]]],[11,"from_bits","","",41,[[["__m128i",3]]]],[11,"from_bits","","",42,[[["__m128i",3]]]],[11,"from_bits","","",44,[[["__m128i",3]]]],[11,"from_bits","","",45,[[["__m128i",3]]]],[11,"from_bits","","",46,[[["__m128i",3]]]],[11,"from_bits","","",51,[[["__m128i",3]]]],[11,"from_bits","","",52,[[["__m128i",3]]]],[11,"from_bits","","",34,[[["__m128d",3]]]],[11,"from_bits","","",35,[[["__m128d",3]]]],[11,"from_bits","","",37,[[["__m128d",3]]]],[11,"from_bits","","",38,[[["__m128d",3]]]],[11,"from_bits","","",40,[[["__m128d",3]]]],[11,"from_bits","","",41,[[["__m128d",3]]]],[11,"from_bits","","",42,[[["__m128d",3]]]],[11,"from_bits","","",44,[[["__m128d",3]]]],[11,"from_bits","","",45,[[["__m128d",3]]]],[11,"from_bits","","",46,[[["__m128d",3]]]],[11,"from_bits","","",51,[[["__m128d",3]]]],[11,"from_bits","","",52,[[["__m128d",3]]]],[11,"from_bits","","",54,[[["__m256",3]]]],[11,"from_bits","","",55,[[["__m256",3]]]],[11,"from_bits","","",57,[[["__m256",3]]]],[11,"from_bits","","",58,[[["__m256",3]]]],[11,"from_bits","","",60,[[["__m256",3]]]],[11,"from_bits","","",61,[[["__m256",3]]]],[11,"from_bits","","",62,[[["__m256",3]]]],[11,"from_bits","","",64,[[["__m256",3]]]],[11,"from_bits","","",65,[[["__m256",3]]]],[11,"from_bits","","",66,[[["__m256",3]]]],[11,"from_bits","","",68,[[["__m256",3]]]],[11,"from_bits","","",69,[[["__m256",3]]]],[11,"from_bits","","",54,[[["__m256i",3]]]],[11,"from_bits","","",55,[[["__m256i",3]]]],[11,"from_bits","","",57,[[["__m256i",3]]]],[11,"from_bits","","",58,[[["__m256i",3]]]],[11,"from_bits","","",60,[[["__m256i",3]]]],[11,"from_bits","","",61,[[["__m256i",3]]]],[11,"from_bits","","",62,[[["__m256i",3]]]],[11,"from_bits","","",64,[[["__m256i",3]]]],[11,"from_bits","","",65,[[["__m256i",3]]]],[11,"from_bits","","",66,[[["__m256i",3]]]],[11,"from_bits","","",68,[[["__m256i",3]]]],[11,"from_bits","","",69,[[["__m256i",3]]]],[11,"from_bits","","",54,[[["__m256d",3]]]],[11,"from_bits","","",55,[[["__m256d",3]]]],[11,"from_bits","","",57,[[["__m256d",3]]]],[11,"from_bits","","",58,[[["__m256d",3]]]],[11,"from_bits","","",60,[[["__m256d",3]]]],[11,"from_bits","","",61,[[["__m256d",3]]]],[11,"from_bits","","",62,[[["__m256d",3]]]],[11,"from_bits","","",64,[[["__m256d",3]]]],[11,"from_bits","","",65,[[["__m256d",3]]]],[11,"from_bits","","",66,[[["__m256d",3]]]],[11,"from_bits","","",68,[[["__m256d",3]]]],[11,"from_bits","","",69,[[["__m256d",3]]]],[11,"test","","",0,[[]]],[11,"test","","",1,[[]]],[11,"test","","",2,[[]]],[11,"test","","",3,[[]]],[11,"test","","",4,[[]]],[11,"test","","",5,[[]]],[11,"from","","",15,[[]]],[11,"from","","",16,[[]]],[11,"from","","",17,[[]]],[11,"from","","",17,[[["m16x2",6]]]],[11,"from","","",17,[[["m32x2",6]]]],[11,"from","","",17,[[["m64x2",6]]]],[11,"from","","",17,[[["m128x2",6]]]],[11,"from","","",18,[[]]],[11,"from","","",19,[[]]],[11,"from","","",20,[[]]],[11,"from","","",20,[[["m16x4",6]]]],[11,"from","","",20,[[["m32x4",6]]]],[11,"from","","",20,[[["m64x4",6]]]],[11,"from","","",21,[[]]],[11,"from","","",21,[[["i8x2",6]]]],[11,"from","","",21,[[["u8x2",6]]]],[11,"from","","",22,[[]]],[11,"from","","",22,[[["u8x2",6]]]],[11,"from","","",23,[[]]],[11,"from","","",23,[[["m8x2",6]]]],[11,"from","","",23,[[["m32x2",6]]]],[11,"from","","",23,[[["m64x2",6]]]],[11,"from","","",23,[[["m128x2",6]]]],[11,"from","","",24,[[]]],[11,"from","","",25,[[]]],[11,"from","","",26,[[]]],[11,"from","","",26,[[["m16x8",6]]]],[11,"from","","",26,[[["m32x8",6]]]],[11,"from","","",27,[[]]],[11,"from","","",27,[[["i8x4",6]]]],[11,"from","","",27,[[["u8x4",6]]]],[11,"from","","",28,[[]]],[11,"from","","",28,[[["u8x4",6]]]],[11,"from","","",29,[[]]],[11,"from","","",29,[[["m8x4",6]]]],[11,"from","","",29,[[["m32x4",6]]]],[11,"from","","",29,[[["m64x4",6]]]],[11,"from","","",30,[[]]],[11,"from","","",30,[[["i8x2",6]]]],[11,"from","","",30,[[["u8x2",6]]]],[11,"from","","",30,[[["i16x2",6]]]],[11,"from","","",30,[[["u16x2",6]]]],[11,"from","","",31,[[]]],[11,"from","","",31,[[["u8x2",6]]]],[11,"from","","",31,[[["u16x2",6]]]],[11,"from","","",33,[[]]],[11,"from","","",33,[[["m8x2",6]]]],[11,"from","","",33,[[["m16x2",6]]]],[11,"from","","",33,[[["m64x2",6]]]],[11,"from","","",33,[[["m128x2",6]]]],[11,"from","","",32,[[]]],[11,"from","","",32,[[["i8x2",6]]]],[11,"from","","",32,[[["u8x2",6]]]],[11,"from","","",32,[[["i16x2",6]]]],[11,"from","","",32,[[["u16x2",6]]]],[11,"from","","",34,[[]]],[11,"from","","",35,[[]]],[11,"from","","",36,[[]]],[11,"from","","",36,[[["m16x16",6]]]],[11,"from","","",37,[[]]],[11,"from","","",37,[[["i8x8",6]]]],[11,"from","","",37,[[["u8x8",6]]]],[11,"from","","",38,[[]]],[11,"from","","",38,[[["u8x8",6]]]],[11,"from","","",39,[[]]],[11,"from","","",39,[[["m8x8",6]]]],[11,"from","","",39,[[["m32x8",6]]]],[11,"from","","",40,[[]]],[11,"from","","",40,[[["i8x4",6]]]],[11,"from","","",40,[[["u8x4",6]]]],[11,"from","","",40,[[["i16x4",6]]]],[11,"from","","",40,[[["u16x4",6]]]],[11,"from","","",41,[[]]],[11,"from","","",41,[[["u8x4",6]]]],[11,"from","","",41,[[["u16x4",6]]]],[11,"from","","",42,[[]]],[11,"from","","",42,[[["i8x4",6]]]],[11,"from","","",42,[[["u8x4",6]]]],[11,"from","","",42,[[["i16x4",6]]]],[11,"from","","",42,[[["u16x4",6]]]],[11,"from","","",43,[[]]],[11,"from","","",43,[[["m8x4",6]]]],[11,"from","","",43,[[["m16x4",6]]]],[11,"from","","",43,[[["m64x4",6]]]],[11,"from","","",44,[[]]],[11,"from","","",44,[[["i8x2",6]]]],[11,"from","","",44,[[["u8x2",6]]]],[11,"from","","",44,[[["i16x2",6]]]],[11,"from","","",44,[[["u16x2",6]]]],[11,"from","","",44,[[["i32x2",6]]]],[11,"from","","",44,[[["u32x2",6]]]],[11,"from","","",45,[[]]],[11,"from","","",45,[[["u8x2",6]]]],[11,"from","","",45,[[["u16x2",6]]]],[11,"from","","",45,[[["u32x2",6]]]],[11,"from","","",46,[[]]],[11,"from","","",46,[[["i8x2",6]]]],[11,"from","","",46,[[["u8x2",6]]]],[11,"from","","",46,[[["i16x2",6]]]],[11,"from","","",46,[[["u16x2",6]]]],[11,"from","","",46,[[["i32x2",6]]]],[11,"from","","",46,[[["u32x2",6]]]],[11,"from","","",46,[[["f32x2",6]]]],[11,"from","","",47,[[]]],[11,"from","","",47,[[["m8x2",6]]]],[11,"from","","",47,[[["m16x2",6]]]],[11,"from","","",47,[[["m32x2",6]]]],[11,"from","","",47,[[["m128x2",6]]]],[11,"from","","",51,[[]]],[11,"from","","",52,[[]]],[11,"from","","",53,[[]]],[11,"from","","",54,[[]]],[11,"from","","",55,[[]]],[11,"from","","",56,[[]]],[11,"from","","",57,[[]]],[11,"from","","",57,[[["i8x16",6]]]],[11,"from","","",57,[[["u8x16",6]]]],[11,"from","","",58,[[]]],[11,"from","","",58,[[["u8x16",6]]]],[11,"from","","",59,[[]]],[11,"from","","",59,[[["m8x16",6]]]],[11,"from","","",60,[[]]],[11,"from","","",60,[[["i8x8",6]]]],[11,"from","","",60,[[["u8x8",6]]]],[11,"from","","",60,[[["i16x8",6]]]],[11,"from","","",60,[[["u16x8",6]]]],[11,"from","","",61,[[]]],[11,"from","","",61,[[["u8x8",6]]]],[11,"from","","",61,[[["u16x8",6]]]],[11,"from","","",62,[[]]],[11,"from","","",62,[[["i8x8",6]]]],[11,"from","","",62,[[["u8x8",6]]]],[11,"from","","",62,[[["i16x8",6]]]],[11,"from","","",62,[[["u16x8",6]]]],[11,"from","","",63,[[]]],[11,"from","","",63,[[["m8x8",6]]]],[11,"from","","",63,[[["m16x8",6]]]],[11,"from","","",64,[[]]],[11,"from","","",64,[[["i8x4",6]]]],[11,"from","","",64,[[["u8x4",6]]]],[11,"from","","",64,[[["i16x4",6]]]],[11,"from","","",64,[[["u16x4",6]]]],[11,"from","","",64,[[["i32x4",6]]]],[11,"from","","",64,[[["u32x4",6]]]],[11,"from","","",65,[[]]],[11,"from","","",65,[[["u8x4",6]]]],[11,"from","","",65,[[["u16x4",6]]]],[11,"from","","",65,[[["u32x4",6]]]],[11,"from","","",66,[[]]],[11,"from","","",66,[[["i8x4",6]]]],[11,"from","","",66,[[["u8x4",6]]]],[11,"from","","",66,[[["i16x4",6]]]],[11,"from","","",66,[[["u16x4",6]]]],[11,"from","","",66,[[["i32x4",6]]]],[11,"from","","",66,[[["u32x4",6]]]],[11,"from","","",66,[[["f32x4",6]]]],[11,"from","","",67,[[]]],[11,"from","","",67,[[["m8x4",6]]]],[11,"from","","",67,[[["m16x4",6]]]],[11,"from","","",67,[[["m32x4",6]]]],[11,"from","","",68,[[]]],[11,"from","","",68,[[["i8x2",6]]]],[11,"from","","",68,[[["u8x2",6]]]],[11,"from","","",68,[[["i16x2",6]]]],[11,"from","","",68,[[["u16x2",6]]]],[11,"from","","",68,[[["i32x2",6]]]],[11,"from","","",68,[[["u32x2",6]]]],[11,"from","","",68,[[["i64x2",6]]]],[11,"from","","",68,[[["u64x2",6]]]],[11,"from","","",69,[[]]],[11,"from","","",69,[[["u8x2",6]]]],[11,"from","","",69,[[["u16x2",6]]]],[11,"from","","",69,[[["u32x2",6]]]],[11,"from","","",69,[[["u64x2",6]]]],[11,"from","","",70,[[]]],[11,"from","","",70,[[["m8x2",6]]]],[11,"from","","",70,[[["m16x2",6]]]],[11,"from","","",70,[[["m32x2",6]]]],[11,"from","","",70,[[["m64x2",6]]]],[11,"from","","",74,[[]]],[11,"from","","",75,[[]]],[11,"from","","",76,[[]]],[11,"from","","",77,[[]]],[11,"from","","",77,[[["i8x32",6]]]],[11,"from","","",77,[[["u8x32",6]]]],[11,"from","","",78,[[]]],[11,"from","","",78,[[["u8x32",6]]]],[11,"from","","",79,[[]]],[11,"from","","",79,[[["m8x32",6]]]],[11,"from","","",80,[[]]],[11,"from","","",80,[[["i8x16",6]]]],[11,"from","","",80,[[["u8x16",6]]]],[11,"from","","",80,[[["i16x16",6]]]],[11,"from","","",80,[[["u16x16",6]]]],[11,"from","","",81,[[]]],[11,"from","","",81,[[["u8x16",6]]]],[11,"from","","",81,[[["u16x16",6]]]],[11,"from","","",82,[[]]],[11,"from","","",82,[[["i8x16",6]]]],[11,"from","","",82,[[["u8x16",6]]]],[11,"from","","",82,[[["i16x16",6]]]],[11,"from","","",82,[[["u16x16",6]]]],[11,"from","","",83,[[]]],[11,"from","","",83,[[["m8x16",6]]]],[11,"from","","",83,[[["m16x16",6]]]],[11,"from","","",84,[[]]],[11,"from","","",84,[[["i8x8",6]]]],[11,"from","","",84,[[["u8x8",6]]]],[11,"from","","",84,[[["i16x8",6]]]],[11,"from","","",84,[[["u16x8",6]]]],[11,"from","","",84,[[["i32x8",6]]]],[11,"from","","",84,[[["u32x8",6]]]],[11,"from","","",85,[[]]],[11,"from","","",85,[[["u8x8",6]]]],[11,"from","","",85,[[["u16x8",6]]]],[11,"from","","",85,[[["u32x8",6]]]],[11,"from","","",86,[[]]],[11,"from","","",86,[[["i8x8",6]]]],[11,"from","","",86,[[["u8x8",6]]]],[11,"from","","",86,[[["i16x8",6]]]],[11,"from","","",86,[[["u16x8",6]]]],[11,"from","","",86,[[["i32x8",6]]]],[11,"from","","",86,[[["u32x8",6]]]],[11,"from","","",86,[[["f32x8",6]]]],[11,"from","","",87,[[]]],[11,"from","","",87,[[["m8x8",6]]]],[11,"from","","",87,[[["m16x8",6]]]],[11,"from","","",87,[[["m32x8",6]]]],[11,"from","","",88,[[]]],[11,"from","","",88,[[["i8x4",6]]]],[11,"from","","",88,[[["u8x4",6]]]],[11,"from","","",88,[[["i16x4",6]]]],[11,"from","","",88,[[["u16x4",6]]]],[11,"from","","",88,[[["i32x4",6]]]],[11,"from","","",88,[[["u32x4",6]]]],[11,"from","","",88,[[["i64x4",6]]]],[11,"from","","",88,[[["u64x4",6]]]],[11,"from","","",89,[[]]],[11,"from","","",89,[[["u8x4",6]]]],[11,"from","","",89,[[["u16x4",6]]]],[11,"from","","",89,[[["u32x4",6]]]],[11,"from","","",89,[[["u64x4",6]]]],[11,"from","","",90,[[]]],[11,"from","","",90,[[["m8x4",6]]]],[11,"from","","",90,[[["m16x4",6]]]],[11,"from","","",90,[[["m32x4",6]]]],[11,"from","","",90,[[["m64x4",6]]]],[11,"from","","",48,[[]]],[11,"from","","",49,[[]]],[11,"from","","",50,[[]]],[11,"from","","",71,[[]]],[11,"from","","",72,[[]]],[11,"from","","",73,[[]]],[11,"from","","",91,[[]]],[11,"from","","",92,[[]]],[11,"from","","",93,[[]]],[11,"from","","",94,[[]]],[11,"from","","",95,[[]]],[11,"from","","",96,[[]]],[11,"from","","",97,[[]]],[11,"from","","",98,[[]]],[11,"from","","",99,[[]]],[11,"fmt","","",14,[[["formatter",3]],["result",6]]],[11,"fmt","","",0,[[["formatter",3]],[["result",4],["error",3]]]],[11,"fmt","","",1,[[["formatter",3]],[["result",4],["error",3]]]],[11,"fmt","","",2,[[["formatter",3]],[["result",4],["error",3]]]],[11,"fmt","","",3,[[["formatter",3]],[["result",4],["error",3]]]],[11,"fmt","","",4,[[["formatter",3]],[["result",4],["error",3]]]],[11,"fmt","","",5,[[["formatter",3]],[["result",4],["error",3]]]],[11,"fmt","","",15,[[["formatter",3]],["result",6]]],[11,"fmt","","",16,[[["formatter",3]],["result",6]]],[11,"fmt","","",17,[[["formatter",3]],["result",6]]],[11,"fmt","","",18,[[["formatter",3]],["result",6]]],[11,"fmt","","",19,[[["formatter",3]],["result",6]]],[11,"fmt","","",20,[[["formatter",3]],["result",6]]],[11,"fmt","","",21,[[["formatter",3]],["result",6]]],[11,"fmt","","",22,[[["formatter",3]],["result",6]]],[11,"fmt","","",23,[[["formatter",3]],["result",6]]],[11,"fmt","","",24,[[["formatter",3]],["result",6]]],[11,"fmt","","",25,[[["formatter",3]],["result",6]]],[11,"fmt","","",26,[[["formatter",3]],["result",6]]],[11,"fmt","","",27,[[["formatter",3]],["result",6]]],[11,"fmt","","",28,[[["formatter",3]],["result",6]]],[11,"fmt","","",29,[[["formatter",3]],["result",6]]],[11,"fmt","","",30,[[["formatter",3]],["result",6]]],[11,"fmt","","",31,[[["formatter",3]],["result",6]]],[11,"fmt","","",33,[[["formatter",3]],["result",6]]],[11,"fmt","","",32,[[["formatter",3]],["result",6]]],[11,"fmt","","",34,[[["formatter",3]],["result",6]]],[11,"fmt","","",35,[[["formatter",3]],["result",6]]],[11,"fmt","","",36,[[["formatter",3]],["result",6]]],[11,"fmt","","",37,[[["formatter",3]],["result",6]]],[11,"fmt","","",38,[[["formatter",3]],["result",6]]],[11,"fmt","","",39,[[["formatter",3]],["result",6]]],[11,"fmt","","",40,[[["formatter",3]],["result",6]]],[11,"fmt","","",41,[[["formatter",3]],["result",6]]],[11,"fmt","","",42,[[["formatter",3]],["result",6]]],[11,"fmt","","",43,[[["formatter",3]],["result",6]]],[11,"fmt","","",44,[[["formatter",3]],["result",6]]],[11,"fmt","","",45,[[["formatter",3]],["result",6]]],[11,"fmt","","",46,[[["formatter",3]],["result",6]]],[11,"fmt","","",47,[[["formatter",3]],["result",6]]],[11,"fmt","","",51,[[["formatter",3]],["result",6]]],[11,"fmt","","",52,[[["formatter",3]],["result",6]]],[11,"fmt","","",53,[[["formatter",3]],["result",6]]],[11,"fmt","","",54,[[["formatter",3]],["result",6]]],[11,"fmt","","",55,[[["formatter",3]],["result",6]]],[11,"fmt","","",56,[[["formatter",3]],["result",6]]],[11,"fmt","","",57,[[["formatter",3]],["result",6]]],[11,"fmt","","",58,[[["formatter",3]],["result",6]]],[11,"fmt","","",59,[[["formatter",3]],["result",6]]],[11,"fmt","","",60,[[["formatter",3]],["result",6]]],[11,"fmt","","",61,[[["formatter",3]],["result",6]]],[11,"fmt","","",62,[[["formatter",3]],["result",6]]],[11,"fmt","","",63,[[["formatter",3]],["result",6]]],[11,"fmt","","",64,[[["formatter",3]],["result",6]]],[11,"fmt","","",65,[[["formatter",3]],["result",6]]],[11,"fmt","","",66,[[["formatter",3]],["result",6]]],[11,"fmt","","",67,[[["formatter",3]],["result",6]]],[11,"fmt","","",68,[[["formatter",3]],["result",6]]],[11,"fmt","","",69,[[["formatter",3]],["result",6]]],[11,"fmt","","",70,[[["formatter",3]],["result",6]]],[11,"fmt","","",74,[[["formatter",3]],["result",6]]],[11,"fmt","","",75,[[["formatter",3]],["result",6]]],[11,"fmt","","",76,[[["formatter",3]],["result",6]]],[11,"fmt","","",77,[[["formatter",3]],["result",6]]],[11,"fmt","","",78,[[["formatter",3]],["result",6]]],[11,"fmt","","",79,[[["formatter",3]],["result",6]]],[11,"fmt","","",80,[[["formatter",3]],["result",6]]],[11,"fmt","","",81,[[["formatter",3]],["result",6]]],[11,"fmt","","",82,[[["formatter",3]],["result",6]]],[11,"fmt","","",83,[[["formatter",3]],["result",6]]],[11,"fmt","","",84,[[["formatter",3]],["result",6]]],[11,"fmt","","",85,[[["formatter",3]],["result",6]]],[11,"fmt","","",86,[[["formatter",3]],["result",6]]],[11,"fmt","","",87,[[["formatter",3]],["result",6]]],[11,"fmt","","",88,[[["formatter",3]],["result",6]]],[11,"fmt","","",89,[[["formatter",3]],["result",6]]],[11,"fmt","","",90,[[["formatter",3]],["result",6]]],[11,"fmt","","",48,[[["formatter",3]],["result",6]]],[11,"fmt","","",49,[[["formatter",3]],["result",6]]],[11,"fmt","","",50,[[["formatter",3]],["result",6]]],[11,"fmt","","",71,[[["formatter",3]],["result",6]]],[11,"fmt","","",72,[[["formatter",3]],["result",6]]],[11,"fmt","","",73,[[["formatter",3]],["result",6]]],[11,"fmt","","",91,[[["formatter",3]],["result",6]]],[11,"fmt","","",92,[[["formatter",3]],["result",6]]],[11,"fmt","","",93,[[["formatter",3]],["result",6]]],[11,"fmt","","",94,[[["formatter",3]],["result",6]]],[11,"fmt","","",95,[[["formatter",3]],["result",6]]],[11,"fmt","","",96,[[["formatter",3]],["result",6]]],[11,"fmt","","",97,[[["formatter",3]],["result",6]]],[11,"fmt","","",98,[[["formatter",3]],["result",6]]],[11,"fmt","","",99,[[["formatter",3]],["result",6]]],[11,"div","","",15,[[]]],[11,"div","","",15,[[]]],[11,"div","","",16,[[]]],[11,"div","","",16,[[]]],[11,"div","","",18,[[]]],[11,"div","","",18,[[]]],[11,"div","","",19,[[]]],[11,"div","","",19,[[]]],[11,"div","","",21,[[]]],[11,"div","","",21,[[]]],[11,"div","","",22,[[]]],[11,"div","","",22,[[]]],[11,"div","","",24,[[]]],[11,"div","","",24,[[]]],[11,"div","","",25,[[]]],[11,"div","","",25,[[]]],[11,"div","","",27,[[]]],[11,"div","","",27,[[]]],[11,"div","","",28,[[]]],[11,"div","","",28,[[]]],[11,"div","","",30,[[]]],[11,"div","","",30,[[]]],[11,"div","","",31,[[]]],[11,"div","","",31,[[]]],[11,"div","","",32,[[]]],[11,"div","","",32,[[]]],[11,"div","","",34,[[]]],[11,"div","","",34,[[]]],[11,"div","","",35,[[]]],[11,"div","","",35,[[]]],[11,"div","","",37,[[]]],[11,"div","","",37,[[]]],[11,"div","","",38,[[]]],[11,"div","","",38,[[]]],[11,"div","","",40,[[]]],[11,"div","","",40,[[]]],[11,"div","","",41,[[]]],[11,"div","","",41,[[]]],[11,"div","","",42,[[]]],[11,"div","","",42,[[]]],[11,"div","","",44,[[]]],[11,"div","","",44,[[]]],[11,"div","","",45,[[]]],[11,"div","","",45,[[]]],[11,"div","","",46,[[]]],[11,"div","","",46,[[]]],[11,"div","","",51,[[]]],[11,"div","","",51,[[]]],[11,"div","","",52,[[]]],[11,"div","","",52,[[]]],[11,"div","","",54,[[]]],[11,"div","","",54,[[]]],[11,"div","","",55,[[]]],[11,"div","","",55,[[]]],[11,"div","","",57,[[]]],[11,"div","","",57,[[]]],[11,"div","","",58,[[]]],[11,"div","","",58,[[]]],[11,"div","","",60,[[]]],[11,"div","","",60,[[]]],[11,"div","","",61,[[]]],[11,"div","","",61,[[]]],[11,"div","","",62,[[]]],[11,"div","","",62,[[]]],[11,"div","","",64,[[]]],[11,"div","","",64,[[]]],[11,"div","","",65,[[]]],[11,"div","","",65,[[]]],[11,"div","","",66,[[]]],[11,"div","","",66,[[]]],[11,"div","","",68,[[]]],[11,"div","","",68,[[]]],[11,"div","","",69,[[]]],[11,"div","","",69,[[]]],[11,"div","","",74,[[]]],[11,"div","","",74,[[]]],[11,"div","","",75,[[]]],[11,"div","","",75,[[]]],[11,"div","","",77,[[]]],[11,"div","","",77,[[]]],[11,"div","","",78,[[]]],[11,"div","","",78,[[]]],[11,"div","","",80,[[]]],[11,"div","","",80,[[]]],[11,"div","","",81,[[]]],[11,"div","","",81,[[]]],[11,"div","","",82,[[]]],[11,"div","","",82,[[]]],[11,"div","","",84,[[]]],[11,"div","","",84,[[]]],[11,"div","","",85,[[]]],[11,"div","","",85,[[]]],[11,"div","","",86,[[]]],[11,"div","","",86,[[]]],[11,"div","","",88,[[]]],[11,"div","","",88,[[]]],[11,"div","","",89,[[]]],[11,"div","","",89,[[]]],[11,"div","","",48,[[]]],[11,"div","","",48,[[]]],[11,"div","","",49,[[]]],[11,"div","","",49,[[]]],[11,"div","","",71,[[]]],[11,"div","","",71,[[]]],[11,"div","","",72,[[]]],[11,"div","","",72,[[]]],[11,"div","","",91,[[]]],[11,"div","","",91,[[]]],[11,"div","","",92,[[]]],[11,"div","","",92,[[]]],[11,"rem","","",15,[[]]],[11,"rem","","",15,[[]]],[11,"rem","","",16,[[]]],[11,"rem","","",16,[[]]],[11,"rem","","",18,[[]]],[11,"rem","","",18,[[]]],[11,"rem","","",19,[[]]],[11,"rem","","",19,[[]]],[11,"rem","","",21,[[]]],[11,"rem","","",21,[[]]],[11,"rem","","",22,[[]]],[11,"rem","","",22,[[]]],[11,"rem","","",24,[[]]],[11,"rem","","",24,[[]]],[11,"rem","","",25,[[]]],[11,"rem","","",25,[[]]],[11,"rem","","",27,[[]]],[11,"rem","","",27,[[]]],[11,"rem","","",28,[[]]],[11,"rem","","",28,[[]]],[11,"rem","","",30,[[]]],[11,"rem","","",30,[[]]],[11,"rem","","",31,[[]]],[11,"rem","","",31,[[]]],[11,"rem","","",32,[[]]],[11,"rem","","",32,[[]]],[11,"rem","","",34,[[]]],[11,"rem","","",34,[[]]],[11,"rem","","",35,[[]]],[11,"rem","","",35,[[]]],[11,"rem","","",37,[[]]],[11,"rem","","",37,[[]]],[11,"rem","","",38,[[]]],[11,"rem","","",38,[[]]],[11,"rem","","",40,[[]]],[11,"rem","","",40,[[]]],[11,"rem","","",41,[[]]],[11,"rem","","",41,[[]]],[11,"rem","","",42,[[]]],[11,"rem","","",42,[[]]],[11,"rem","","",44,[[]]],[11,"rem","","",44,[[]]],[11,"rem","","",45,[[]]],[11,"rem","","",45,[[]]],[11,"rem","","",46,[[]]],[11,"rem","","",46,[[]]],[11,"rem","","",51,[[]]],[11,"rem","","",51,[[]]],[11,"rem","","",52,[[]]],[11,"rem","","",52,[[]]],[11,"rem","","",54,[[]]],[11,"rem","","",54,[[]]],[11,"rem","","",55,[[]]],[11,"rem","","",55,[[]]],[11,"rem","","",57,[[]]],[11,"rem","","",57,[[]]],[11,"rem","","",58,[[]]],[11,"rem","","",58,[[]]],[11,"rem","","",60,[[]]],[11,"rem","","",60,[[]]],[11,"rem","","",61,[[]]],[11,"rem","","",61,[[]]],[11,"rem","","",62,[[]]],[11,"rem","","",62,[[]]],[11,"rem","","",64,[[]]],[11,"rem","","",64,[[]]],[11,"rem","","",65,[[]]],[11,"rem","","",65,[[]]],[11,"rem","","",66,[[]]],[11,"rem","","",66,[[]]],[11,"rem","","",68,[[]]],[11,"rem","","",68,[[]]],[11,"rem","","",69,[[]]],[11,"rem","","",69,[[]]],[11,"rem","","",74,[[]]],[11,"rem","","",74,[[]]],[11,"rem","","",75,[[]]],[11,"rem","","",75,[[]]],[11,"rem","","",77,[[]]],[11,"rem","","",77,[[]]],[11,"rem","","",78,[[]]],[11,"rem","","",78,[[]]],[11,"rem","","",80,[[]]],[11,"rem","","",80,[[]]],[11,"rem","","",81,[[]]],[11,"rem","","",81,[[]]],[11,"rem","","",82,[[]]],[11,"rem","","",82,[[]]],[11,"rem","","",84,[[]]],[11,"rem","","",84,[[]]],[11,"rem","","",85,[[]]],[11,"rem","","",85,[[]]],[11,"rem","","",86,[[]]],[11,"rem","","",86,[[]]],[11,"rem","","",88,[[]]],[11,"rem","","",88,[[]]],[11,"rem","","",89,[[]]],[11,"rem","","",89,[[]]],[11,"rem","","",48,[[]]],[11,"rem","","",48,[[]]],[11,"rem","","",49,[[]]],[11,"rem","","",49,[[]]],[11,"rem","","",71,[[]]],[11,"rem","","",71,[[]]],[11,"rem","","",72,[[]]],[11,"rem","","",72,[[]]],[11,"rem","","",91,[[]]],[11,"rem","","",91,[[]]],[11,"rem","","",92,[[]]],[11,"rem","","",92,[[]]],[11,"sub","","",15,[[]]],[11,"sub","","",15,[[]]],[11,"sub","","",16,[[]]],[11,"sub","","",16,[[]]],[11,"sub","","",18,[[]]],[11,"sub","","",18,[[]]],[11,"sub","","",19,[[]]],[11,"sub","","",19,[[]]],[11,"sub","","",21,[[]]],[11,"sub","","",21,[[]]],[11,"sub","","",22,[[]]],[11,"sub","","",22,[[]]],[11,"sub","","",24,[[]]],[11,"sub","","",24,[[]]],[11,"sub","","",25,[[]]],[11,"sub","","",25,[[]]],[11,"sub","","",27,[[]]],[11,"sub","","",27,[[]]],[11,"sub","","",28,[[]]],[11,"sub","","",28,[[]]],[11,"sub","","",30,[[]]],[11,"sub","","",30,[[]]],[11,"sub","","",31,[[]]],[11,"sub","","",31,[[]]],[11,"sub","","",32,[[]]],[11,"sub","","",32,[[]]],[11,"sub","","",34,[[]]],[11,"sub","","",34,[[]]],[11,"sub","","",35,[[]]],[11,"sub","","",35,[[]]],[11,"sub","","",37,[[]]],[11,"sub","","",37,[[]]],[11,"sub","","",38,[[]]],[11,"sub","","",38,[[]]],[11,"sub","","",40,[[]]],[11,"sub","","",40,[[]]],[11,"sub","","",41,[[]]],[11,"sub","","",41,[[]]],[11,"sub","","",42,[[]]],[11,"sub","","",42,[[]]],[11,"sub","","",44,[[]]],[11,"sub","","",44,[[]]],[11,"sub","","",45,[[]]],[11,"sub","","",45,[[]]],[11,"sub","","",46,[[]]],[11,"sub","","",46,[[]]],[11,"sub","","",51,[[]]],[11,"sub","","",51,[[]]],[11,"sub","","",52,[[]]],[11,"sub","","",52,[[]]],[11,"sub","","",54,[[]]],[11,"sub","","",54,[[]]],[11,"sub","","",55,[[]]],[11,"sub","","",55,[[]]],[11,"sub","","",57,[[]]],[11,"sub","","",57,[[]]],[11,"sub","","",58,[[]]],[11,"sub","","",58,[[]]],[11,"sub","","",60,[[]]],[11,"sub","","",60,[[]]],[11,"sub","","",61,[[]]],[11,"sub","","",61,[[]]],[11,"sub","","",62,[[]]],[11,"sub","","",62,[[]]],[11,"sub","","",64,[[]]],[11,"sub","","",64,[[]]],[11,"sub","","",65,[[]]],[11,"sub","","",65,[[]]],[11,"sub","","",66,[[]]],[11,"sub","","",66,[[]]],[11,"sub","","",68,[[]]],[11,"sub","","",68,[[]]],[11,"sub","","",69,[[]]],[11,"sub","","",69,[[]]],[11,"sub","","",74,[[]]],[11,"sub","","",74,[[]]],[11,"sub","","",75,[[]]],[11,"sub","","",75,[[]]],[11,"sub","","",77,[[]]],[11,"sub","","",77,[[]]],[11,"sub","","",78,[[]]],[11,"sub","","",78,[[]]],[11,"sub","","",80,[[]]],[11,"sub","","",80,[[]]],[11,"sub","","",81,[[]]],[11,"sub","","",81,[[]]],[11,"sub","","",82,[[]]],[11,"sub","","",82,[[]]],[11,"sub","","",84,[[]]],[11,"sub","","",84,[[]]],[11,"sub","","",85,[[]]],[11,"sub","","",85,[[]]],[11,"sub","","",86,[[]]],[11,"sub","","",86,[[]]],[11,"sub","","",88,[[]]],[11,"sub","","",88,[[]]],[11,"sub","","",89,[[]]],[11,"sub","","",89,[[]]],[11,"sub","","",48,[[]]],[11,"sub","","",48,[[]]],[11,"sub","","",49,[[]]],[11,"sub","","",49,[[]]],[11,"sub","","",71,[[]]],[11,"sub","","",71,[[]]],[11,"sub","","",72,[[]]],[11,"sub","","",72,[[]]],[11,"sub","","",91,[[]]],[11,"sub","","",91,[[]]],[11,"sub","","",92,[[]]],[11,"sub","","",92,[[]]],[11,"eq","","",0,[[]]],[11,"ne","","",0,[[]]],[11,"eq","","",1,[[]]],[11,"ne","","",1,[[]]],[11,"eq","","",2,[[]]],[11,"ne","","",2,[[]]],[11,"eq","","",3,[[]]],[11,"ne","","",3,[[]]],[11,"eq","","",4,[[]]],[11,"ne","","",4,[[]]],[11,"eq","","",5,[[]]],[11,"ne","","",5,[[]]],[11,"eq","","",15,[[]]],[11,"ne","","",15,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",16,[[]]],[11,"ne","","",16,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",17,[[]]],[11,"ne","","",17,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",18,[[]]],[11,"ne","","",18,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",19,[[]]],[11,"ne","","",19,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",20,[[]]],[11,"ne","","",20,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",21,[[]]],[11,"ne","","",21,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",22,[[]]],[11,"ne","","",22,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",23,[[]]],[11,"ne","","",23,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",24,[[]]],[11,"ne","","",24,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",25,[[]]],[11,"ne","","",25,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",26,[[]]],[11,"ne","","",26,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",27,[[]]],[11,"ne","","",27,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",28,[[]]],[11,"ne","","",28,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",29,[[]]],[11,"ne","","",29,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",30,[[]]],[11,"ne","","",30,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",31,[[]]],[11,"ne","","",31,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",33,[[]]],[11,"ne","","",33,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",32,[[]]],[11,"ne","","",32,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",34,[[]]],[11,"ne","","",34,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",35,[[]]],[11,"ne","","",35,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",36,[[]]],[11,"ne","","",36,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",37,[[]]],[11,"ne","","",37,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",38,[[]]],[11,"ne","","",38,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",39,[[]]],[11,"ne","","",39,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",40,[[]]],[11,"ne","","",40,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",41,[[]]],[11,"ne","","",41,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",42,[[]]],[11,"ne","","",42,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",43,[[]]],[11,"ne","","",43,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",44,[[]]],[11,"ne","","",44,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",45,[[]]],[11,"ne","","",45,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",46,[[]]],[11,"ne","","",46,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",47,[[]]],[11,"ne","","",47,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",51,[[]]],[11,"ne","","",51,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",52,[[]]],[11,"ne","","",52,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",53,[[]]],[11,"ne","","",53,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",54,[[]]],[11,"ne","","",54,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",55,[[]]],[11,"ne","","",55,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",56,[[]]],[11,"ne","","",56,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",57,[[]]],[11,"ne","","",57,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",58,[[]]],[11,"ne","","",58,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",59,[[]]],[11,"ne","","",59,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",60,[[]]],[11,"ne","","",60,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",61,[[]]],[11,"ne","","",61,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",62,[[]]],[11,"ne","","",62,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",63,[[]]],[11,"ne","","",63,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",64,[[]]],[11,"ne","","",64,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",65,[[]]],[11,"ne","","",65,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",66,[[]]],[11,"ne","","",66,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",67,[[]]],[11,"ne","","",67,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",68,[[]]],[11,"ne","","",68,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",69,[[]]],[11,"ne","","",69,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",70,[[]]],[11,"ne","","",70,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",74,[[]]],[11,"ne","","",74,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",75,[[]]],[11,"ne","","",75,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",76,[[]]],[11,"ne","","",76,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",77,[[]]],[11,"ne","","",77,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",78,[[]]],[11,"ne","","",78,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",79,[[]]],[11,"ne","","",79,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",80,[[]]],[11,"ne","","",80,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",81,[[]]],[11,"ne","","",81,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",82,[[]]],[11,"ne","","",82,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",83,[[]]],[11,"ne","","",83,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",84,[[]]],[11,"ne","","",84,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",85,[[]]],[11,"ne","","",85,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",86,[[]]],[11,"ne","","",86,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",87,[[]]],[11,"ne","","",87,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",88,[[]]],[11,"ne","","",88,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",89,[[]]],[11,"ne","","",89,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",90,[[]]],[11,"ne","","",90,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",48,[[]]],[11,"ne","","",48,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",49,[[]]],[11,"ne","","",49,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",50,[[]]],[11,"ne","","",50,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",71,[[]]],[11,"ne","","",71,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",72,[[]]],[11,"ne","","",72,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",73,[[]]],[11,"ne","","",73,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",91,[[]]],[11,"ne","","",91,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",92,[[]]],[11,"ne","","",92,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",93,[[]]],[11,"ne","","",93,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",94,[[]]],[11,"ne","","",94,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",95,[[]]],[11,"ne","","",95,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",96,[[]]],[11,"ne","","",96,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",97,[[]]],[11,"ne","","",97,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",98,[[]]],[11,"ne","","",98,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"eq","","",99,[[]]],[11,"ne","","",99,[[]]],[11,"eq","","",14,[[]]],[11,"ne","","",14,[[]]],[11,"cmp","","",0,[[],["ordering",4]]],[11,"cmp","","",1,[[],["ordering",4]]],[11,"cmp","","",2,[[],["ordering",4]]],[11,"cmp","","",3,[[],["ordering",4]]],[11,"cmp","","",4,[[],["ordering",4]]],[11,"cmp","","",5,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"cmp","","",14,[[],["ordering",4]]],[11,"partial_cmp","","",0,[[],[["option",4],["ordering",4]]]],[11,"lt","","",0,[[]]],[11,"gt","","",0,[[]]],[11,"le","","",0,[[]]],[11,"ge","","",0,[[]]],[11,"partial_cmp","","",1,[[],[["option",4],["ordering",4]]]],[11,"lt","","",1,[[]]],[11,"gt","","",1,[[]]],[11,"le","","",1,[[]]],[11,"ge","","",1,[[]]],[11,"partial_cmp","","",2,[[],[["option",4],["ordering",4]]]],[11,"lt","","",2,[[]]],[11,"gt","","",2,[[]]],[11,"le","","",2,[[]]],[11,"ge","","",2,[[]]],[11,"partial_cmp","","",3,[[],[["option",4],["ordering",4]]]],[11,"lt","","",3,[[]]],[11,"gt","","",3,[[]]],[11,"le","","",3,[[]]],[11,"ge","","",3,[[]]],[11,"partial_cmp","","",4,[[],[["option",4],["ordering",4]]]],[11,"lt","","",4,[[]]],[11,"gt","","",4,[[]]],[11,"le","","",4,[[]]],[11,"ge","","",4,[[]]],[11,"partial_cmp","","",5,[[],[["option",4],["ordering",4]]]],[11,"lt","","",5,[[]]],[11,"gt","","",5,[[]]],[11,"le","","",5,[[]]],[11,"ge","","",5,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"partial_cmp","","",14,[[],[["option",4],["ordering",4]]]],[11,"lt","","",14,[[]]],[11,"le","","",14,[[]]],[11,"ge","","",14,[[]]],[11,"gt","","",14,[[]]],[11,"add","","",15,[[]]],[11,"add","","",15,[[]]],[11,"add","","",16,[[]]],[11,"add","","",16,[[]]],[11,"add","","",18,[[]]],[11,"add","","",18,[[]]],[11,"add","","",19,[[]]],[11,"add","","",19,[[]]],[11,"add","","",21,[[]]],[11,"add","","",21,[[]]],[11,"add","","",22,[[]]],[11,"add","","",22,[[]]],[11,"add","","",24,[[]]],[11,"add","","",24,[[]]],[11,"add","","",25,[[]]],[11,"add","","",25,[[]]],[11,"add","","",27,[[]]],[11,"add","","",27,[[]]],[11,"add","","",28,[[]]],[11,"add","","",28,[[]]],[11,"add","","",30,[[]]],[11,"add","","",30,[[]]],[11,"add","","",31,[[]]],[11,"add","","",31,[[]]],[11,"add","","",32,[[]]],[11,"add","","",32,[[]]],[11,"add","","",34,[[]]],[11,"add","","",34,[[]]],[11,"add","","",35,[[]]],[11,"add","","",35,[[]]],[11,"add","","",37,[[]]],[11,"add","","",37,[[]]],[11,"add","","",38,[[]]],[11,"add","","",38,[[]]],[11,"add","","",40,[[]]],[11,"add","","",40,[[]]],[11,"add","","",41,[[]]],[11,"add","","",41,[[]]],[11,"add","","",42,[[]]],[11,"add","","",42,[[]]],[11,"add","","",44,[[]]],[11,"add","","",44,[[]]],[11,"add","","",45,[[]]],[11,"add","","",45,[[]]],[11,"add","","",46,[[]]],[11,"add","","",46,[[]]],[11,"add","","",51,[[]]],[11,"add","","",51,[[]]],[11,"add","","",52,[[]]],[11,"add","","",52,[[]]],[11,"add","","",54,[[]]],[11,"add","","",54,[[]]],[11,"add","","",55,[[]]],[11,"add","","",55,[[]]],[11,"add","","",57,[[]]],[11,"add","","",57,[[]]],[11,"add","","",58,[[]]],[11,"add","","",58,[[]]],[11,"add","","",60,[[]]],[11,"add","","",60,[[]]],[11,"add","","",61,[[]]],[11,"add","","",61,[[]]],[11,"add","","",62,[[]]],[11,"add","","",62,[[]]],[11,"add","","",64,[[]]],[11,"add","","",64,[[]]],[11,"add","","",65,[[]]],[11,"add","","",65,[[]]],[11,"add","","",66,[[]]],[11,"add","","",66,[[]]],[11,"add","","",68,[[]]],[11,"add","","",68,[[]]],[11,"add","","",69,[[]]],[11,"add","","",69,[[]]],[11,"add","","",74,[[]]],[11,"add","","",74,[[]]],[11,"add","","",75,[[]]],[11,"add","","",75,[[]]],[11,"add","","",77,[[]]],[11,"add","","",77,[[]]],[11,"add","","",78,[[]]],[11,"add","","",78,[[]]],[11,"add","","",80,[[]]],[11,"add","","",80,[[]]],[11,"add","","",81,[[]]],[11,"add","","",81,[[]]],[11,"add","","",82,[[]]],[11,"add","","",82,[[]]],[11,"add","","",84,[[]]],[11,"add","","",84,[[]]],[11,"add","","",85,[[]]],[11,"add","","",85,[[]]],[11,"add","","",86,[[]]],[11,"add","","",86,[[]]],[11,"add","","",88,[[]]],[11,"add","","",88,[[]]],[11,"add","","",89,[[]]],[11,"add","","",89,[[]]],[11,"add","","",48,[[]]],[11,"add","","",48,[[]]],[11,"add","","",49,[[]]],[11,"add","","",49,[[]]],[11,"add","","",71,[[]]],[11,"add","","",71,[[]]],[11,"add","","",72,[[]]],[11,"add","","",72,[[]]],[11,"add","","",91,[[]]],[11,"add","","",91,[[]]],[11,"add","","",92,[[]]],[11,"add","","",92,[[]]],[11,"mul","","",15,[[]]],[11,"mul","","",15,[[]]],[11,"mul","","",16,[[]]],[11,"mul","","",16,[[]]],[11,"mul","","",18,[[]]],[11,"mul","","",18,[[]]],[11,"mul","","",19,[[]]],[11,"mul","","",19,[[]]],[11,"mul","","",21,[[]]],[11,"mul","","",21,[[]]],[11,"mul","","",22,[[]]],[11,"mul","","",22,[[]]],[11,"mul","","",24,[[]]],[11,"mul","","",24,[[]]],[11,"mul","","",25,[[]]],[11,"mul","","",25,[[]]],[11,"mul","","",27,[[]]],[11,"mul","","",27,[[]]],[11,"mul","","",28,[[]]],[11,"mul","","",28,[[]]],[11,"mul","","",30,[[]]],[11,"mul","","",30,[[]]],[11,"mul","","",31,[[]]],[11,"mul","","",31,[[]]],[11,"mul","","",32,[[]]],[11,"mul","","",32,[[]]],[11,"mul","","",34,[[]]],[11,"mul","","",34,[[]]],[11,"mul","","",35,[[]]],[11,"mul","","",35,[[]]],[11,"mul","","",37,[[]]],[11,"mul","","",37,[[]]],[11,"mul","","",38,[[]]],[11,"mul","","",38,[[]]],[11,"mul","","",40,[[]]],[11,"mul","","",40,[[]]],[11,"mul","","",41,[[]]],[11,"mul","","",41,[[]]],[11,"mul","","",42,[[]]],[11,"mul","","",42,[[]]],[11,"mul","","",44,[[]]],[11,"mul","","",44,[[]]],[11,"mul","","",45,[[]]],[11,"mul","","",45,[[]]],[11,"mul","","",46,[[]]],[11,"mul","","",46,[[]]],[11,"mul","","",51,[[]]],[11,"mul","","",51,[[]]],[11,"mul","","",52,[[]]],[11,"mul","","",52,[[]]],[11,"mul","","",54,[[]]],[11,"mul","","",54,[[]]],[11,"mul","","",55,[[]]],[11,"mul","","",55,[[]]],[11,"mul","","",57,[[]]],[11,"mul","","",57,[[]]],[11,"mul","","",58,[[]]],[11,"mul","","",58,[[]]],[11,"mul","","",60,[[]]],[11,"mul","","",60,[[]]],[11,"mul","","",61,[[]]],[11,"mul","","",61,[[]]],[11,"mul","","",62,[[]]],[11,"mul","","",62,[[]]],[11,"mul","","",64,[[]]],[11,"mul","","",64,[[]]],[11,"mul","","",65,[[]]],[11,"mul","","",65,[[]]],[11,"mul","","",66,[[]]],[11,"mul","","",66,[[]]],[11,"mul","","",68,[[]]],[11,"mul","","",68,[[]]],[11,"mul","","",69,[[]]],[11,"mul","","",69,[[]]],[11,"mul","","",74,[[]]],[11,"mul","","",74,[[]]],[11,"mul","","",75,[[]]],[11,"mul","","",75,[[]]],[11,"mul","","",77,[[]]],[11,"mul","","",77,[[]]],[11,"mul","","",78,[[]]],[11,"mul","","",78,[[]]],[11,"mul","","",80,[[]]],[11,"mul","","",80,[[]]],[11,"mul","","",81,[[]]],[11,"mul","","",81,[[]]],[11,"mul","","",82,[[]]],[11,"mul","","",82,[[]]],[11,"mul","","",84,[[]]],[11,"mul","","",84,[[]]],[11,"mul","","",85,[[]]],[11,"mul","","",85,[[]]],[11,"mul","","",86,[[]]],[11,"mul","","",86,[[]]],[11,"mul","","",88,[[]]],[11,"mul","","",88,[[]]],[11,"mul","","",89,[[]]],[11,"mul","","",89,[[]]],[11,"mul","","",48,[[]]],[11,"mul","","",48,[[]]],[11,"mul","","",49,[[]]],[11,"mul","","",49,[[]]],[11,"mul","","",71,[[]]],[11,"mul","","",71,[[]]],[11,"mul","","",72,[[]]],[11,"mul","","",72,[[]]],[11,"mul","","",91,[[]]],[11,"mul","","",91,[[]]],[11,"mul","","",92,[[]]],[11,"mul","","",92,[[]]],[11,"neg","","",15,[[]]],[11,"neg","","",18,[[]]],[11,"neg","","",21,[[]]],[11,"neg","","",24,[[]]],[11,"neg","","",27,[[]]],[11,"neg","","",30,[[]]],[11,"neg","","",32,[[]]],[11,"neg","","",34,[[]]],[11,"neg","","",37,[[]]],[11,"neg","","",40,[[]]],[11,"neg","","",42,[[]]],[11,"neg","","",44,[[]]],[11,"neg","","",46,[[]]],[11,"neg","","",51,[[]]],[11,"neg","","",54,[[]]],[11,"neg","","",57,[[]]],[11,"neg","","",60,[[]]],[11,"neg","","",62,[[]]],[11,"neg","","",64,[[]]],[11,"neg","","",66,[[]]],[11,"neg","","",68,[[]]],[11,"neg","","",74,[[]]],[11,"neg","","",77,[[]]],[11,"neg","","",80,[[]]],[11,"neg","","",82,[[]]],[11,"neg","","",84,[[]]],[11,"neg","","",86,[[]]],[11,"neg","","",88,[[]]],[11,"neg","","",48,[[]]],[11,"neg","","",71,[[]]],[11,"neg","","",91,[[]]],[11,"add_assign","","",15,[[]]],[11,"add_assign","","",15,[[]]],[11,"add_assign","","",16,[[]]],[11,"add_assign","","",16,[[]]],[11,"add_assign","","",18,[[]]],[11,"add_assign","","",18,[[]]],[11,"add_assign","","",19,[[]]],[11,"add_assign","","",19,[[]]],[11,"add_assign","","",21,[[]]],[11,"add_assign","","",21,[[]]],[11,"add_assign","","",22,[[]]],[11,"add_assign","","",22,[[]]],[11,"add_assign","","",24,[[]]],[11,"add_assign","","",24,[[]]],[11,"add_assign","","",25,[[]]],[11,"add_assign","","",25,[[]]],[11,"add_assign","","",27,[[]]],[11,"add_assign","","",27,[[]]],[11,"add_assign","","",28,[[]]],[11,"add_assign","","",28,[[]]],[11,"add_assign","","",30,[[]]],[11,"add_assign","","",30,[[]]],[11,"add_assign","","",31,[[]]],[11,"add_assign","","",31,[[]]],[11,"add_assign","","",32,[[]]],[11,"add_assign","","",32,[[]]],[11,"add_assign","","",34,[[]]],[11,"add_assign","","",34,[[]]],[11,"add_assign","","",35,[[]]],[11,"add_assign","","",35,[[]]],[11,"add_assign","","",37,[[]]],[11,"add_assign","","",37,[[]]],[11,"add_assign","","",38,[[]]],[11,"add_assign","","",38,[[]]],[11,"add_assign","","",40,[[]]],[11,"add_assign","","",40,[[]]],[11,"add_assign","","",41,[[]]],[11,"add_assign","","",41,[[]]],[11,"add_assign","","",42,[[]]],[11,"add_assign","","",42,[[]]],[11,"add_assign","","",44,[[]]],[11,"add_assign","","",44,[[]]],[11,"add_assign","","",45,[[]]],[11,"add_assign","","",45,[[]]],[11,"add_assign","","",46,[[]]],[11,"add_assign","","",46,[[]]],[11,"add_assign","","",51,[[]]],[11,"add_assign","","",51,[[]]],[11,"add_assign","","",52,[[]]],[11,"add_assign","","",52,[[]]],[11,"add_assign","","",54,[[]]],[11,"add_assign","","",54,[[]]],[11,"add_assign","","",55,[[]]],[11,"add_assign","","",55,[[]]],[11,"add_assign","","",57,[[]]],[11,"add_assign","","",57,[[]]],[11,"add_assign","","",58,[[]]],[11,"add_assign","","",58,[[]]],[11,"add_assign","","",60,[[]]],[11,"add_assign","","",60,[[]]],[11,"add_assign","","",61,[[]]],[11,"add_assign","","",61,[[]]],[11,"add_assign","","",62,[[]]],[11,"add_assign","","",62,[[]]],[11,"add_assign","","",64,[[]]],[11,"add_assign","","",64,[[]]],[11,"add_assign","","",65,[[]]],[11,"add_assign","","",65,[[]]],[11,"add_assign","","",66,[[]]],[11,"add_assign","","",66,[[]]],[11,"add_assign","","",68,[[]]],[11,"add_assign","","",68,[[]]],[11,"add_assign","","",69,[[]]],[11,"add_assign","","",69,[[]]],[11,"add_assign","","",74,[[]]],[11,"add_assign","","",74,[[]]],[11,"add_assign","","",75,[[]]],[11,"add_assign","","",75,[[]]],[11,"add_assign","","",77,[[]]],[11,"add_assign","","",77,[[]]],[11,"add_assign","","",78,[[]]],[11,"add_assign","","",78,[[]]],[11,"add_assign","","",80,[[]]],[11,"add_assign","","",80,[[]]],[11,"add_assign","","",81,[[]]],[11,"add_assign","","",81,[[]]],[11,"add_assign","","",82,[[]]],[11,"add_assign","","",82,[[]]],[11,"add_assign","","",84,[[]]],[11,"add_assign","","",84,[[]]],[11,"add_assign","","",85,[[]]],[11,"add_assign","","",85,[[]]],[11,"add_assign","","",86,[[]]],[11,"add_assign","","",86,[[]]],[11,"add_assign","","",88,[[]]],[11,"add_assign","","",88,[[]]],[11,"add_assign","","",89,[[]]],[11,"add_assign","","",89,[[]]],[11,"add_assign","","",48,[[]]],[11,"add_assign","","",48,[[]]],[11,"add_assign","","",49,[[]]],[11,"add_assign","","",49,[[]]],[11,"add_assign","","",71,[[]]],[11,"add_assign","","",71,[[]]],[11,"add_assign","","",72,[[]]],[11,"add_assign","","",72,[[]]],[11,"add_assign","","",91,[[]]],[11,"add_assign","","",91,[[]]],[11,"add_assign","","",92,[[]]],[11,"add_assign","","",92,[[]]],[11,"sub_assign","","",15,[[]]],[11,"sub_assign","","",15,[[]]],[11,"sub_assign","","",16,[[]]],[11,"sub_assign","","",16,[[]]],[11,"sub_assign","","",18,[[]]],[11,"sub_assign","","",18,[[]]],[11,"sub_assign","","",19,[[]]],[11,"sub_assign","","",19,[[]]],[11,"sub_assign","","",21,[[]]],[11,"sub_assign","","",21,[[]]],[11,"sub_assign","","",22,[[]]],[11,"sub_assign","","",22,[[]]],[11,"sub_assign","","",24,[[]]],[11,"sub_assign","","",24,[[]]],[11,"sub_assign","","",25,[[]]],[11,"sub_assign","","",25,[[]]],[11,"sub_assign","","",27,[[]]],[11,"sub_assign","","",27,[[]]],[11,"sub_assign","","",28,[[]]],[11,"sub_assign","","",28,[[]]],[11,"sub_assign","","",30,[[]]],[11,"sub_assign","","",30,[[]]],[11,"sub_assign","","",31,[[]]],[11,"sub_assign","","",31,[[]]],[11,"sub_assign","","",32,[[]]],[11,"sub_assign","","",32,[[]]],[11,"sub_assign","","",34,[[]]],[11,"sub_assign","","",34,[[]]],[11,"sub_assign","","",35,[[]]],[11,"sub_assign","","",35,[[]]],[11,"sub_assign","","",37,[[]]],[11,"sub_assign","","",37,[[]]],[11,"sub_assign","","",38,[[]]],[11,"sub_assign","","",38,[[]]],[11,"sub_assign","","",40,[[]]],[11,"sub_assign","","",40,[[]]],[11,"sub_assign","","",41,[[]]],[11,"sub_assign","","",41,[[]]],[11,"sub_assign","","",42,[[]]],[11,"sub_assign","","",42,[[]]],[11,"sub_assign","","",44,[[]]],[11,"sub_assign","","",44,[[]]],[11,"sub_assign","","",45,[[]]],[11,"sub_assign","","",45,[[]]],[11,"sub_assign","","",46,[[]]],[11,"sub_assign","","",46,[[]]],[11,"sub_assign","","",51,[[]]],[11,"sub_assign","","",51,[[]]],[11,"sub_assign","","",52,[[]]],[11,"sub_assign","","",52,[[]]],[11,"sub_assign","","",54,[[]]],[11,"sub_assign","","",54,[[]]],[11,"sub_assign","","",55,[[]]],[11,"sub_assign","","",55,[[]]],[11,"sub_assign","","",57,[[]]],[11,"sub_assign","","",57,[[]]],[11,"sub_assign","","",58,[[]]],[11,"sub_assign","","",58,[[]]],[11,"sub_assign","","",60,[[]]],[11,"sub_assign","","",60,[[]]],[11,"sub_assign","","",61,[[]]],[11,"sub_assign","","",61,[[]]],[11,"sub_assign","","",62,[[]]],[11,"sub_assign","","",62,[[]]],[11,"sub_assign","","",64,[[]]],[11,"sub_assign","","",64,[[]]],[11,"sub_assign","","",65,[[]]],[11,"sub_assign","","",65,[[]]],[11,"sub_assign","","",66,[[]]],[11,"sub_assign","","",66,[[]]],[11,"sub_assign","","",68,[[]]],[11,"sub_assign","","",68,[[]]],[11,"sub_assign","","",69,[[]]],[11,"sub_assign","","",69,[[]]],[11,"sub_assign","","",74,[[]]],[11,"sub_assign","","",74,[[]]],[11,"sub_assign","","",75,[[]]],[11,"sub_assign","","",75,[[]]],[11,"sub_assign","","",77,[[]]],[11,"sub_assign","","",77,[[]]],[11,"sub_assign","","",78,[[]]],[11,"sub_assign","","",78,[[]]],[11,"sub_assign","","",80,[[]]],[11,"sub_assign","","",80,[[]]],[11,"sub_assign","","",81,[[]]],[11,"sub_assign","","",81,[[]]],[11,"sub_assign","","",82,[[]]],[11,"sub_assign","","",82,[[]]],[11,"sub_assign","","",84,[[]]],[11,"sub_assign","","",84,[[]]],[11,"sub_assign","","",85,[[]]],[11,"sub_assign","","",85,[[]]],[11,"sub_assign","","",86,[[]]],[11,"sub_assign","","",86,[[]]],[11,"sub_assign","","",88,[[]]],[11,"sub_assign","","",88,[[]]],[11,"sub_assign","","",89,[[]]],[11,"sub_assign","","",89,[[]]],[11,"sub_assign","","",48,[[]]],[11,"sub_assign","","",48,[[]]],[11,"sub_assign","","",49,[[]]],[11,"sub_assign","","",49,[[]]],[11,"sub_assign","","",71,[[]]],[11,"sub_assign","","",71,[[]]],[11,"sub_assign","","",72,[[]]],[11,"sub_assign","","",72,[[]]],[11,"sub_assign","","",91,[[]]],[11,"sub_assign","","",91,[[]]],[11,"sub_assign","","",92,[[]]],[11,"sub_assign","","",92,[[]]],[11,"mul_assign","","",15,[[]]],[11,"mul_assign","","",15,[[]]],[11,"mul_assign","","",16,[[]]],[11,"mul_assign","","",16,[[]]],[11,"mul_assign","","",18,[[]]],[11,"mul_assign","","",18,[[]]],[11,"mul_assign","","",19,[[]]],[11,"mul_assign","","",19,[[]]],[11,"mul_assign","","",21,[[]]],[11,"mul_assign","","",21,[[]]],[11,"mul_assign","","",22,[[]]],[11,"mul_assign","","",22,[[]]],[11,"mul_assign","","",24,[[]]],[11,"mul_assign","","",24,[[]]],[11,"mul_assign","","",25,[[]]],[11,"mul_assign","","",25,[[]]],[11,"mul_assign","","",27,[[]]],[11,"mul_assign","","",27,[[]]],[11,"mul_assign","","",28,[[]]],[11,"mul_assign","","",28,[[]]],[11,"mul_assign","","",30,[[]]],[11,"mul_assign","","",30,[[]]],[11,"mul_assign","","",31,[[]]],[11,"mul_assign","","",31,[[]]],[11,"mul_assign","","",32,[[]]],[11,"mul_assign","","",32,[[]]],[11,"mul_assign","","",34,[[]]],[11,"mul_assign","","",34,[[]]],[11,"mul_assign","","",35,[[]]],[11,"mul_assign","","",35,[[]]],[11,"mul_assign","","",37,[[]]],[11,"mul_assign","","",37,[[]]],[11,"mul_assign","","",38,[[]]],[11,"mul_assign","","",38,[[]]],[11,"mul_assign","","",40,[[]]],[11,"mul_assign","","",40,[[]]],[11,"mul_assign","","",41,[[]]],[11,"mul_assign","","",41,[[]]],[11,"mul_assign","","",42,[[]]],[11,"mul_assign","","",42,[[]]],[11,"mul_assign","","",44,[[]]],[11,"mul_assign","","",44,[[]]],[11,"mul_assign","","",45,[[]]],[11,"mul_assign","","",45,[[]]],[11,"mul_assign","","",46,[[]]],[11,"mul_assign","","",46,[[]]],[11,"mul_assign","","",51,[[]]],[11,"mul_assign","","",51,[[]]],[11,"mul_assign","","",52,[[]]],[11,"mul_assign","","",52,[[]]],[11,"mul_assign","","",54,[[]]],[11,"mul_assign","","",54,[[]]],[11,"mul_assign","","",55,[[]]],[11,"mul_assign","","",55,[[]]],[11,"mul_assign","","",57,[[]]],[11,"mul_assign","","",57,[[]]],[11,"mul_assign","","",58,[[]]],[11,"mul_assign","","",58,[[]]],[11,"mul_assign","","",60,[[]]],[11,"mul_assign","","",60,[[]]],[11,"mul_assign","","",61,[[]]],[11,"mul_assign","","",61,[[]]],[11,"mul_assign","","",62,[[]]],[11,"mul_assign","","",62,[[]]],[11,"mul_assign","","",64,[[]]],[11,"mul_assign","","",64,[[]]],[11,"mul_assign","","",65,[[]]],[11,"mul_assign","","",65,[[]]],[11,"mul_assign","","",66,[[]]],[11,"mul_assign","","",66,[[]]],[11,"mul_assign","","",68,[[]]],[11,"mul_assign","","",68,[[]]],[11,"mul_assign","","",69,[[]]],[11,"mul_assign","","",69,[[]]],[11,"mul_assign","","",74,[[]]],[11,"mul_assign","","",74,[[]]],[11,"mul_assign","","",75,[[]]],[11,"mul_assign","","",75,[[]]],[11,"mul_assign","","",77,[[]]],[11,"mul_assign","","",77,[[]]],[11,"mul_assign","","",78,[[]]],[11,"mul_assign","","",78,[[]]],[11,"mul_assign","","",80,[[]]],[11,"mul_assign","","",80,[[]]],[11,"mul_assign","","",81,[[]]],[11,"mul_assign","","",81,[[]]],[11,"mul_assign","","",82,[[]]],[11,"mul_assign","","",82,[[]]],[11,"mul_assign","","",84,[[]]],[11,"mul_assign","","",84,[[]]],[11,"mul_assign","","",85,[[]]],[11,"mul_assign","","",85,[[]]],[11,"mul_assign","","",86,[[]]],[11,"mul_assign","","",86,[[]]],[11,"mul_assign","","",88,[[]]],[11,"mul_assign","","",88,[[]]],[11,"mul_assign","","",89,[[]]],[11,"mul_assign","","",89,[[]]],[11,"mul_assign","","",48,[[]]],[11,"mul_assign","","",48,[[]]],[11,"mul_assign","","",49,[[]]],[11,"mul_assign","","",49,[[]]],[11,"mul_assign","","",71,[[]]],[11,"mul_assign","","",71,[[]]],[11,"mul_assign","","",72,[[]]],[11,"mul_assign","","",72,[[]]],[11,"mul_assign","","",91,[[]]],[11,"mul_assign","","",91,[[]]],[11,"mul_assign","","",92,[[]]],[11,"mul_assign","","",92,[[]]],[11,"div_assign","","",15,[[]]],[11,"div_assign","","",15,[[]]],[11,"div_assign","","",16,[[]]],[11,"div_assign","","",16,[[]]],[11,"div_assign","","",18,[[]]],[11,"div_assign","","",18,[[]]],[11,"div_assign","","",19,[[]]],[11,"div_assign","","",19,[[]]],[11,"div_assign","","",21,[[]]],[11,"div_assign","","",21,[[]]],[11,"div_assign","","",22,[[]]],[11,"div_assign","","",22,[[]]],[11,"div_assign","","",24,[[]]],[11,"div_assign","","",24,[[]]],[11,"div_assign","","",25,[[]]],[11,"div_assign","","",25,[[]]],[11,"div_assign","","",27,[[]]],[11,"div_assign","","",27,[[]]],[11,"div_assign","","",28,[[]]],[11,"div_assign","","",28,[[]]],[11,"div_assign","","",30,[[]]],[11,"div_assign","","",30,[[]]],[11,"div_assign","","",31,[[]]],[11,"div_assign","","",31,[[]]],[11,"div_assign","","",32,[[]]],[11,"div_assign","","",32,[[]]],[11,"div_assign","","",34,[[]]],[11,"div_assign","","",34,[[]]],[11,"div_assign","","",35,[[]]],[11,"div_assign","","",35,[[]]],[11,"div_assign","","",37,[[]]],[11,"div_assign","","",37,[[]]],[11,"div_assign","","",38,[[]]],[11,"div_assign","","",38,[[]]],[11,"div_assign","","",40,[[]]],[11,"div_assign","","",40,[[]]],[11,"div_assign","","",41,[[]]],[11,"div_assign","","",41,[[]]],[11,"div_assign","","",42,[[]]],[11,"div_assign","","",42,[[]]],[11,"div_assign","","",44,[[]]],[11,"div_assign","","",44,[[]]],[11,"div_assign","","",45,[[]]],[11,"div_assign","","",45,[[]]],[11,"div_assign","","",46,[[]]],[11,"div_assign","","",46,[[]]],[11,"div_assign","","",51,[[]]],[11,"div_assign","","",51,[[]]],[11,"div_assign","","",52,[[]]],[11,"div_assign","","",52,[[]]],[11,"div_assign","","",54,[[]]],[11,"div_assign","","",54,[[]]],[11,"div_assign","","",55,[[]]],[11,"div_assign","","",55,[[]]],[11,"div_assign","","",57,[[]]],[11,"div_assign","","",57,[[]]],[11,"div_assign","","",58,[[]]],[11,"div_assign","","",58,[[]]],[11,"div_assign","","",60,[[]]],[11,"div_assign","","",60,[[]]],[11,"div_assign","","",61,[[]]],[11,"div_assign","","",61,[[]]],[11,"div_assign","","",62,[[]]],[11,"div_assign","","",62,[[]]],[11,"div_assign","","",64,[[]]],[11,"div_assign","","",64,[[]]],[11,"div_assign","","",65,[[]]],[11,"div_assign","","",65,[[]]],[11,"div_assign","","",66,[[]]],[11,"div_assign","","",66,[[]]],[11,"div_assign","","",68,[[]]],[11,"div_assign","","",68,[[]]],[11,"div_assign","","",69,[[]]],[11,"div_assign","","",69,[[]]],[11,"div_assign","","",74,[[]]],[11,"div_assign","","",74,[[]]],[11,"div_assign","","",75,[[]]],[11,"div_assign","","",75,[[]]],[11,"div_assign","","",77,[[]]],[11,"div_assign","","",77,[[]]],[11,"div_assign","","",78,[[]]],[11,"div_assign","","",78,[[]]],[11,"div_assign","","",80,[[]]],[11,"div_assign","","",80,[[]]],[11,"div_assign","","",81,[[]]],[11,"div_assign","","",81,[[]]],[11,"div_assign","","",82,[[]]],[11,"div_assign","","",82,[[]]],[11,"div_assign","","",84,[[]]],[11,"div_assign","","",84,[[]]],[11,"div_assign","","",85,[[]]],[11,"div_assign","","",85,[[]]],[11,"div_assign","","",86,[[]]],[11,"div_assign","","",86,[[]]],[11,"div_assign","","",88,[[]]],[11,"div_assign","","",88,[[]]],[11,"div_assign","","",89,[[]]],[11,"div_assign","","",89,[[]]],[11,"div_assign","","",48,[[]]],[11,"div_assign","","",48,[[]]],[11,"div_assign","","",49,[[]]],[11,"div_assign","","",49,[[]]],[11,"div_assign","","",71,[[]]],[11,"div_assign","","",71,[[]]],[11,"div_assign","","",72,[[]]],[11,"div_assign","","",72,[[]]],[11,"div_assign","","",91,[[]]],[11,"div_assign","","",91,[[]]],[11,"div_assign","","",92,[[]]],[11,"div_assign","","",92,[[]]],[11,"rem_assign","","",15,[[]]],[11,"rem_assign","","",15,[[]]],[11,"rem_assign","","",16,[[]]],[11,"rem_assign","","",16,[[]]],[11,"rem_assign","","",18,[[]]],[11,"rem_assign","","",18,[[]]],[11,"rem_assign","","",19,[[]]],[11,"rem_assign","","",19,[[]]],[11,"rem_assign","","",21,[[]]],[11,"rem_assign","","",21,[[]]],[11,"rem_assign","","",22,[[]]],[11,"rem_assign","","",22,[[]]],[11,"rem_assign","","",24,[[]]],[11,"rem_assign","","",24,[[]]],[11,"rem_assign","","",25,[[]]],[11,"rem_assign","","",25,[[]]],[11,"rem_assign","","",27,[[]]],[11,"rem_assign","","",27,[[]]],[11,"rem_assign","","",28,[[]]],[11,"rem_assign","","",28,[[]]],[11,"rem_assign","","",30,[[]]],[11,"rem_assign","","",30,[[]]],[11,"rem_assign","","",31,[[]]],[11,"rem_assign","","",31,[[]]],[11,"rem_assign","","",32,[[]]],[11,"rem_assign","","",32,[[]]],[11,"rem_assign","","",34,[[]]],[11,"rem_assign","","",34,[[]]],[11,"rem_assign","","",35,[[]]],[11,"rem_assign","","",35,[[]]],[11,"rem_assign","","",37,[[]]],[11,"rem_assign","","",37,[[]]],[11,"rem_assign","","",38,[[]]],[11,"rem_assign","","",38,[[]]],[11,"rem_assign","","",40,[[]]],[11,"rem_assign","","",40,[[]]],[11,"rem_assign","","",41,[[]]],[11,"rem_assign","","",41,[[]]],[11,"rem_assign","","",42,[[]]],[11,"rem_assign","","",42,[[]]],[11,"rem_assign","","",44,[[]]],[11,"rem_assign","","",44,[[]]],[11,"rem_assign","","",45,[[]]],[11,"rem_assign","","",45,[[]]],[11,"rem_assign","","",46,[[]]],[11,"rem_assign","","",46,[[]]],[11,"rem_assign","","",51,[[]]],[11,"rem_assign","","",51,[[]]],[11,"rem_assign","","",52,[[]]],[11,"rem_assign","","",52,[[]]],[11,"rem_assign","","",54,[[]]],[11,"rem_assign","","",54,[[]]],[11,"rem_assign","","",55,[[]]],[11,"rem_assign","","",55,[[]]],[11,"rem_assign","","",57,[[]]],[11,"rem_assign","","",57,[[]]],[11,"rem_assign","","",58,[[]]],[11,"rem_assign","","",58,[[]]],[11,"rem_assign","","",60,[[]]],[11,"rem_assign","","",60,[[]]],[11,"rem_assign","","",61,[[]]],[11,"rem_assign","","",61,[[]]],[11,"rem_assign","","",62,[[]]],[11,"rem_assign","","",62,[[]]],[11,"rem_assign","","",64,[[]]],[11,"rem_assign","","",64,[[]]],[11,"rem_assign","","",65,[[]]],[11,"rem_assign","","",65,[[]]],[11,"rem_assign","","",66,[[]]],[11,"rem_assign","","",66,[[]]],[11,"rem_assign","","",68,[[]]],[11,"rem_assign","","",68,[[]]],[11,"rem_assign","","",69,[[]]],[11,"rem_assign","","",69,[[]]],[11,"rem_assign","","",74,[[]]],[11,"rem_assign","","",74,[[]]],[11,"rem_assign","","",75,[[]]],[11,"rem_assign","","",75,[[]]],[11,"rem_assign","","",77,[[]]],[11,"rem_assign","","",77,[[]]],[11,"rem_assign","","",78,[[]]],[11,"rem_assign","","",78,[[]]],[11,"rem_assign","","",80,[[]]],[11,"rem_assign","","",80,[[]]],[11,"rem_assign","","",81,[[]]],[11,"rem_assign","","",81,[[]]],[11,"rem_assign","","",82,[[]]],[11,"rem_assign","","",82,[[]]],[11,"rem_assign","","",84,[[]]],[11,"rem_assign","","",84,[[]]],[11,"rem_assign","","",85,[[]]],[11,"rem_assign","","",85,[[]]],[11,"rem_assign","","",86,[[]]],[11,"rem_assign","","",86,[[]]],[11,"rem_assign","","",88,[[]]],[11,"rem_assign","","",88,[[]]],[11,"rem_assign","","",89,[[]]],[11,"rem_assign","","",89,[[]]],[11,"rem_assign","","",48,[[]]],[11,"rem_assign","","",48,[[]]],[11,"rem_assign","","",49,[[]]],[11,"rem_assign","","",49,[[]]],[11,"rem_assign","","",71,[[]]],[11,"rem_assign","","",71,[[]]],[11,"rem_assign","","",72,[[]]],[11,"rem_assign","","",72,[[]]],[11,"rem_assign","","",91,[[]]],[11,"rem_assign","","",91,[[]]],[11,"rem_assign","","",92,[[]]],[11,"rem_assign","","",92,[[]]],[11,"not","","",15,[[]]],[11,"not","","",16,[[]]],[11,"not","","",17,[[]]],[11,"not","","",18,[[]]],[11,"not","","",19,[[]]],[11,"not","","",20,[[]]],[11,"not","","",21,[[]]],[11,"not","","",22,[[]]],[11,"not","","",23,[[]]],[11,"not","","",24,[[]]],[11,"not","","",25,[[]]],[11,"not","","",26,[[]]],[11,"not","","",27,[[]]],[11,"not","","",28,[[]]],[11,"not","","",29,[[]]],[11,"not","","",30,[[]]],[11,"not","","",31,[[]]],[11,"not","","",33,[[]]],[11,"not","","",34,[[]]],[11,"not","","",35,[[]]],[11,"not","","",36,[[]]],[11,"not","","",37,[[]]],[11,"not","","",38,[[]]],[11,"not","","",39,[[]]],[11,"not","","",40,[[]]],[11,"not","","",41,[[]]],[11,"not","","",43,[[]]],[11,"not","","",44,[[]]],[11,"not","","",45,[[]]],[11,"not","","",47,[[]]],[11,"not","","",51,[[]]],[11,"not","","",52,[[]]],[11,"not","","",53,[[]]],[11,"not","","",54,[[]]],[11,"not","","",55,[[]]],[11,"not","","",56,[[]]],[11,"not","","",57,[[]]],[11,"not","","",58,[[]]],[11,"not","","",59,[[]]],[11,"not","","",60,[[]]],[11,"not","","",61,[[]]],[11,"not","","",63,[[]]],[11,"not","","",64,[[]]],[11,"not","","",65,[[]]],[11,"not","","",67,[[]]],[11,"not","","",68,[[]]],[11,"not","","",69,[[]]],[11,"not","","",70,[[]]],[11,"not","","",74,[[]]],[11,"not","","",75,[[]]],[11,"not","","",76,[[]]],[11,"not","","",77,[[]]],[11,"not","","",78,[[]]],[11,"not","","",79,[[]]],[11,"not","","",80,[[]]],[11,"not","","",81,[[]]],[11,"not","","",83,[[]]],[11,"not","","",84,[[]]],[11,"not","","",85,[[]]],[11,"not","","",87,[[]]],[11,"not","","",88,[[]]],[11,"not","","",89,[[]]],[11,"not","","",90,[[]]],[11,"not","","",48,[[]]],[11,"not","","",49,[[]]],[11,"not","","",50,[[]]],[11,"not","","",71,[[]]],[11,"not","","",72,[[]]],[11,"not","","",73,[[]]],[11,"not","","",91,[[]]],[11,"not","","",92,[[]]],[11,"not","","",93,[[]]],[11,"bitand","","",15,[[]]],[11,"bitand","","",15,[[]]],[11,"bitand","","",16,[[]]],[11,"bitand","","",16,[[]]],[11,"bitand","","",17,[[]]],[11,"bitand","","",17,[[]]],[11,"bitand","","",18,[[]]],[11,"bitand","","",18,[[]]],[11,"bitand","","",19,[[]]],[11,"bitand","","",19,[[]]],[11,"bitand","","",20,[[]]],[11,"bitand","","",20,[[]]],[11,"bitand","","",21,[[]]],[11,"bitand","","",21,[[]]],[11,"bitand","","",22,[[]]],[11,"bitand","","",22,[[]]],[11,"bitand","","",23,[[]]],[11,"bitand","","",23,[[]]],[11,"bitand","","",24,[[]]],[11,"bitand","","",24,[[]]],[11,"bitand","","",25,[[]]],[11,"bitand","","",25,[[]]],[11,"bitand","","",26,[[]]],[11,"bitand","","",26,[[]]],[11,"bitand","","",27,[[]]],[11,"bitand","","",27,[[]]],[11,"bitand","","",28,[[]]],[11,"bitand","","",28,[[]]],[11,"bitand","","",29,[[]]],[11,"bitand","","",29,[[]]],[11,"bitand","","",30,[[]]],[11,"bitand","","",30,[[]]],[11,"bitand","","",31,[[]]],[11,"bitand","","",31,[[]]],[11,"bitand","","",33,[[]]],[11,"bitand","","",33,[[]]],[11,"bitand","","",34,[[]]],[11,"bitand","","",34,[[]]],[11,"bitand","","",35,[[]]],[11,"bitand","","",35,[[]]],[11,"bitand","","",36,[[]]],[11,"bitand","","",36,[[]]],[11,"bitand","","",37,[[]]],[11,"bitand","","",37,[[]]],[11,"bitand","","",38,[[]]],[11,"bitand","","",38,[[]]],[11,"bitand","","",39,[[]]],[11,"bitand","","",39,[[]]],[11,"bitand","","",40,[[]]],[11,"bitand","","",40,[[]]],[11,"bitand","","",41,[[]]],[11,"bitand","","",41,[[]]],[11,"bitand","","",43,[[]]],[11,"bitand","","",43,[[]]],[11,"bitand","","",44,[[]]],[11,"bitand","","",44,[[]]],[11,"bitand","","",45,[[]]],[11,"bitand","","",45,[[]]],[11,"bitand","","",47,[[]]],[11,"bitand","","",47,[[]]],[11,"bitand","","",51,[[]]],[11,"bitand","","",51,[[]]],[11,"bitand","","",52,[[]]],[11,"bitand","","",52,[[]]],[11,"bitand","","",53,[[]]],[11,"bitand","","",53,[[]]],[11,"bitand","","",54,[[]]],[11,"bitand","","",54,[[]]],[11,"bitand","","",55,[[]]],[11,"bitand","","",55,[[]]],[11,"bitand","","",56,[[]]],[11,"bitand","","",56,[[]]],[11,"bitand","","",57,[[]]],[11,"bitand","","",57,[[]]],[11,"bitand","","",58,[[]]],[11,"bitand","","",58,[[]]],[11,"bitand","","",59,[[]]],[11,"bitand","","",59,[[]]],[11,"bitand","","",60,[[]]],[11,"bitand","","",60,[[]]],[11,"bitand","","",61,[[]]],[11,"bitand","","",61,[[]]],[11,"bitand","","",63,[[]]],[11,"bitand","","",63,[[]]],[11,"bitand","","",64,[[]]],[11,"bitand","","",64,[[]]],[11,"bitand","","",65,[[]]],[11,"bitand","","",65,[[]]],[11,"bitand","","",67,[[]]],[11,"bitand","","",67,[[]]],[11,"bitand","","",68,[[]]],[11,"bitand","","",68,[[]]],[11,"bitand","","",69,[[]]],[11,"bitand","","",69,[[]]],[11,"bitand","","",70,[[]]],[11,"bitand","","",70,[[]]],[11,"bitand","","",74,[[]]],[11,"bitand","","",74,[[]]],[11,"bitand","","",75,[[]]],[11,"bitand","","",75,[[]]],[11,"bitand","","",76,[[]]],[11,"bitand","","",76,[[]]],[11,"bitand","","",77,[[]]],[11,"bitand","","",77,[[]]],[11,"bitand","","",78,[[]]],[11,"bitand","","",78,[[]]],[11,"bitand","","",79,[[]]],[11,"bitand","","",79,[[]]],[11,"bitand","","",80,[[]]],[11,"bitand","","",80,[[]]],[11,"bitand","","",81,[[]]],[11,"bitand","","",81,[[]]],[11,"bitand","","",83,[[]]],[11,"bitand","","",83,[[]]],[11,"bitand","","",84,[[]]],[11,"bitand","","",84,[[]]],[11,"bitand","","",85,[[]]],[11,"bitand","","",85,[[]]],[11,"bitand","","",87,[[]]],[11,"bitand","","",87,[[]]],[11,"bitand","","",88,[[]]],[11,"bitand","","",88,[[]]],[11,"bitand","","",89,[[]]],[11,"bitand","","",89,[[]]],[11,"bitand","","",90,[[]]],[11,"bitand","","",90,[[]]],[11,"bitand","","",48,[[]]],[11,"bitand","","",48,[[]]],[11,"bitand","","",49,[[]]],[11,"bitand","","",49,[[]]],[11,"bitand","","",50,[[]]],[11,"bitand","","",50,[[]]],[11,"bitand","","",71,[[]]],[11,"bitand","","",71,[[]]],[11,"bitand","","",72,[[]]],[11,"bitand","","",72,[[]]],[11,"bitand","","",73,[[]]],[11,"bitand","","",73,[[]]],[11,"bitand","","",91,[[]]],[11,"bitand","","",91,[[]]],[11,"bitand","","",92,[[]]],[11,"bitand","","",92,[[]]],[11,"bitand","","",93,[[]]],[11,"bitand","","",93,[[]]],[11,"bitor","","",15,[[]]],[11,"bitor","","",15,[[]]],[11,"bitor","","",16,[[]]],[11,"bitor","","",16,[[]]],[11,"bitor","","",17,[[]]],[11,"bitor","","",17,[[]]],[11,"bitor","","",18,[[]]],[11,"bitor","","",18,[[]]],[11,"bitor","","",19,[[]]],[11,"bitor","","",19,[[]]],[11,"bitor","","",20,[[]]],[11,"bitor","","",20,[[]]],[11,"bitor","","",21,[[]]],[11,"bitor","","",21,[[]]],[11,"bitor","","",22,[[]]],[11,"bitor","","",22,[[]]],[11,"bitor","","",23,[[]]],[11,"bitor","","",23,[[]]],[11,"bitor","","",24,[[]]],[11,"bitor","","",24,[[]]],[11,"bitor","","",25,[[]]],[11,"bitor","","",25,[[]]],[11,"bitor","","",26,[[]]],[11,"bitor","","",26,[[]]],[11,"bitor","","",27,[[]]],[11,"bitor","","",27,[[]]],[11,"bitor","","",28,[[]]],[11,"bitor","","",28,[[]]],[11,"bitor","","",29,[[]]],[11,"bitor","","",29,[[]]],[11,"bitor","","",30,[[]]],[11,"bitor","","",30,[[]]],[11,"bitor","","",31,[[]]],[11,"bitor","","",31,[[]]],[11,"bitor","","",33,[[]]],[11,"bitor","","",33,[[]]],[11,"bitor","","",34,[[]]],[11,"bitor","","",34,[[]]],[11,"bitor","","",35,[[]]],[11,"bitor","","",35,[[]]],[11,"bitor","","",36,[[]]],[11,"bitor","","",36,[[]]],[11,"bitor","","",37,[[]]],[11,"bitor","","",37,[[]]],[11,"bitor","","",38,[[]]],[11,"bitor","","",38,[[]]],[11,"bitor","","",39,[[]]],[11,"bitor","","",39,[[]]],[11,"bitor","","",40,[[]]],[11,"bitor","","",40,[[]]],[11,"bitor","","",41,[[]]],[11,"bitor","","",41,[[]]],[11,"bitor","","",43,[[]]],[11,"bitor","","",43,[[]]],[11,"bitor","","",44,[[]]],[11,"bitor","","",44,[[]]],[11,"bitor","","",45,[[]]],[11,"bitor","","",45,[[]]],[11,"bitor","","",47,[[]]],[11,"bitor","","",47,[[]]],[11,"bitor","","",51,[[]]],[11,"bitor","","",51,[[]]],[11,"bitor","","",52,[[]]],[11,"bitor","","",52,[[]]],[11,"bitor","","",53,[[]]],[11,"bitor","","",53,[[]]],[11,"bitor","","",54,[[]]],[11,"bitor","","",54,[[]]],[11,"bitor","","",55,[[]]],[11,"bitor","","",55,[[]]],[11,"bitor","","",56,[[]]],[11,"bitor","","",56,[[]]],[11,"bitor","","",57,[[]]],[11,"bitor","","",57,[[]]],[11,"bitor","","",58,[[]]],[11,"bitor","","",58,[[]]],[11,"bitor","","",59,[[]]],[11,"bitor","","",59,[[]]],[11,"bitor","","",60,[[]]],[11,"bitor","","",60,[[]]],[11,"bitor","","",61,[[]]],[11,"bitor","","",61,[[]]],[11,"bitor","","",63,[[]]],[11,"bitor","","",63,[[]]],[11,"bitor","","",64,[[]]],[11,"bitor","","",64,[[]]],[11,"bitor","","",65,[[]]],[11,"bitor","","",65,[[]]],[11,"bitor","","",67,[[]]],[11,"bitor","","",67,[[]]],[11,"bitor","","",68,[[]]],[11,"bitor","","",68,[[]]],[11,"bitor","","",69,[[]]],[11,"bitor","","",69,[[]]],[11,"bitor","","",70,[[]]],[11,"bitor","","",70,[[]]],[11,"bitor","","",74,[[]]],[11,"bitor","","",74,[[]]],[11,"bitor","","",75,[[]]],[11,"bitor","","",75,[[]]],[11,"bitor","","",76,[[]]],[11,"bitor","","",76,[[]]],[11,"bitor","","",77,[[]]],[11,"bitor","","",77,[[]]],[11,"bitor","","",78,[[]]],[11,"bitor","","",78,[[]]],[11,"bitor","","",79,[[]]],[11,"bitor","","",79,[[]]],[11,"bitor","","",80,[[]]],[11,"bitor","","",80,[[]]],[11,"bitor","","",81,[[]]],[11,"bitor","","",81,[[]]],[11,"bitor","","",83,[[]]],[11,"bitor","","",83,[[]]],[11,"bitor","","",84,[[]]],[11,"bitor","","",84,[[]]],[11,"bitor","","",85,[[]]],[11,"bitor","","",85,[[]]],[11,"bitor","","",87,[[]]],[11,"bitor","","",87,[[]]],[11,"bitor","","",88,[[]]],[11,"bitor","","",88,[[]]],[11,"bitor","","",89,[[]]],[11,"bitor","","",89,[[]]],[11,"bitor","","",90,[[]]],[11,"bitor","","",90,[[]]],[11,"bitor","","",48,[[]]],[11,"bitor","","",48,[[]]],[11,"bitor","","",49,[[]]],[11,"bitor","","",49,[[]]],[11,"bitor","","",50,[[]]],[11,"bitor","","",50,[[]]],[11,"bitor","","",71,[[]]],[11,"bitor","","",71,[[]]],[11,"bitor","","",72,[[]]],[11,"bitor","","",72,[[]]],[11,"bitor","","",73,[[]]],[11,"bitor","","",73,[[]]],[11,"bitor","","",91,[[]]],[11,"bitor","","",91,[[]]],[11,"bitor","","",92,[[]]],[11,"bitor","","",92,[[]]],[11,"bitor","","",93,[[]]],[11,"bitor","","",93,[[]]],[11,"bitxor","","",15,[[]]],[11,"bitxor","","",15,[[]]],[11,"bitxor","","",16,[[]]],[11,"bitxor","","",16,[[]]],[11,"bitxor","","",17,[[]]],[11,"bitxor","","",17,[[]]],[11,"bitxor","","",18,[[]]],[11,"bitxor","","",18,[[]]],[11,"bitxor","","",19,[[]]],[11,"bitxor","","",19,[[]]],[11,"bitxor","","",20,[[]]],[11,"bitxor","","",20,[[]]],[11,"bitxor","","",21,[[]]],[11,"bitxor","","",21,[[]]],[11,"bitxor","","",22,[[]]],[11,"bitxor","","",22,[[]]],[11,"bitxor","","",23,[[]]],[11,"bitxor","","",23,[[]]],[11,"bitxor","","",24,[[]]],[11,"bitxor","","",24,[[]]],[11,"bitxor","","",25,[[]]],[11,"bitxor","","",25,[[]]],[11,"bitxor","","",26,[[]]],[11,"bitxor","","",26,[[]]],[11,"bitxor","","",27,[[]]],[11,"bitxor","","",27,[[]]],[11,"bitxor","","",28,[[]]],[11,"bitxor","","",28,[[]]],[11,"bitxor","","",29,[[]]],[11,"bitxor","","",29,[[]]],[11,"bitxor","","",30,[[]]],[11,"bitxor","","",30,[[]]],[11,"bitxor","","",31,[[]]],[11,"bitxor","","",31,[[]]],[11,"bitxor","","",33,[[]]],[11,"bitxor","","",33,[[]]],[11,"bitxor","","",34,[[]]],[11,"bitxor","","",34,[[]]],[11,"bitxor","","",35,[[]]],[11,"bitxor","","",35,[[]]],[11,"bitxor","","",36,[[]]],[11,"bitxor","","",36,[[]]],[11,"bitxor","","",37,[[]]],[11,"bitxor","","",37,[[]]],[11,"bitxor","","",38,[[]]],[11,"bitxor","","",38,[[]]],[11,"bitxor","","",39,[[]]],[11,"bitxor","","",39,[[]]],[11,"bitxor","","",40,[[]]],[11,"bitxor","","",40,[[]]],[11,"bitxor","","",41,[[]]],[11,"bitxor","","",41,[[]]],[11,"bitxor","","",43,[[]]],[11,"bitxor","","",43,[[]]],[11,"bitxor","","",44,[[]]],[11,"bitxor","","",44,[[]]],[11,"bitxor","","",45,[[]]],[11,"bitxor","","",45,[[]]],[11,"bitxor","","",47,[[]]],[11,"bitxor","","",47,[[]]],[11,"bitxor","","",51,[[]]],[11,"bitxor","","",51,[[]]],[11,"bitxor","","",52,[[]]],[11,"bitxor","","",52,[[]]],[11,"bitxor","","",53,[[]]],[11,"bitxor","","",53,[[]]],[11,"bitxor","","",54,[[]]],[11,"bitxor","","",54,[[]]],[11,"bitxor","","",55,[[]]],[11,"bitxor","","",55,[[]]],[11,"bitxor","","",56,[[]]],[11,"bitxor","","",56,[[]]],[11,"bitxor","","",57,[[]]],[11,"bitxor","","",57,[[]]],[11,"bitxor","","",58,[[]]],[11,"bitxor","","",58,[[]]],[11,"bitxor","","",59,[[]]],[11,"bitxor","","",59,[[]]],[11,"bitxor","","",60,[[]]],[11,"bitxor","","",60,[[]]],[11,"bitxor","","",61,[[]]],[11,"bitxor","","",61,[[]]],[11,"bitxor","","",63,[[]]],[11,"bitxor","","",63,[[]]],[11,"bitxor","","",64,[[]]],[11,"bitxor","","",64,[[]]],[11,"bitxor","","",65,[[]]],[11,"bitxor","","",65,[[]]],[11,"bitxor","","",67,[[]]],[11,"bitxor","","",67,[[]]],[11,"bitxor","","",68,[[]]],[11,"bitxor","","",68,[[]]],[11,"bitxor","","",69,[[]]],[11,"bitxor","","",69,[[]]],[11,"bitxor","","",70,[[]]],[11,"bitxor","","",70,[[]]],[11,"bitxor","","",74,[[]]],[11,"bitxor","","",74,[[]]],[11,"bitxor","","",75,[[]]],[11,"bitxor","","",75,[[]]],[11,"bitxor","","",76,[[]]],[11,"bitxor","","",76,[[]]],[11,"bitxor","","",77,[[]]],[11,"bitxor","","",77,[[]]],[11,"bitxor","","",78,[[]]],[11,"bitxor","","",78,[[]]],[11,"bitxor","","",79,[[]]],[11,"bitxor","","",79,[[]]],[11,"bitxor","","",80,[[]]],[11,"bitxor","","",80,[[]]],[11,"bitxor","","",81,[[]]],[11,"bitxor","","",81,[[]]],[11,"bitxor","","",83,[[]]],[11,"bitxor","","",83,[[]]],[11,"bitxor","","",84,[[]]],[11,"bitxor","","",84,[[]]],[11,"bitxor","","",85,[[]]],[11,"bitxor","","",85,[[]]],[11,"bitxor","","",87,[[]]],[11,"bitxor","","",87,[[]]],[11,"bitxor","","",88,[[]]],[11,"bitxor","","",88,[[]]],[11,"bitxor","","",89,[[]]],[11,"bitxor","","",89,[[]]],[11,"bitxor","","",90,[[]]],[11,"bitxor","","",90,[[]]],[11,"bitxor","","",48,[[]]],[11,"bitxor","","",48,[[]]],[11,"bitxor","","",49,[[]]],[11,"bitxor","","",49,[[]]],[11,"bitxor","","",50,[[]]],[11,"bitxor","","",50,[[]]],[11,"bitxor","","",71,[[]]],[11,"bitxor","","",71,[[]]],[11,"bitxor","","",72,[[]]],[11,"bitxor","","",72,[[]]],[11,"bitxor","","",73,[[]]],[11,"bitxor","","",73,[[]]],[11,"bitxor","","",91,[[]]],[11,"bitxor","","",91,[[]]],[11,"bitxor","","",92,[[]]],[11,"bitxor","","",92,[[]]],[11,"bitxor","","",93,[[]]],[11,"bitxor","","",93,[[]]],[11,"shl","","",15,[[]]],[11,"shl","","",15,[[]]],[11,"shl","","",16,[[]]],[11,"shl","","",16,[[]]],[11,"shl","","",18,[[]]],[11,"shl","","",18,[[]]],[11,"shl","","",19,[[]]],[11,"shl","","",19,[[]]],[11,"shl","","",21,[[]]],[11,"shl","","",21,[[]]],[11,"shl","","",22,[[]]],[11,"shl","","",22,[[]]],[11,"shl","","",24,[[]]],[11,"shl","","",24,[[]]],[11,"shl","","",25,[[]]],[11,"shl","","",25,[[]]],[11,"shl","","",27,[[]]],[11,"shl","","",27,[[]]],[11,"shl","","",28,[[]]],[11,"shl","","",28,[[]]],[11,"shl","","",30,[[]]],[11,"shl","","",30,[[]]],[11,"shl","","",31,[[]]],[11,"shl","","",31,[[]]],[11,"shl","","",34,[[]]],[11,"shl","","",34,[[]]],[11,"shl","","",35,[[]]],[11,"shl","","",35,[[]]],[11,"shl","","",37,[[]]],[11,"shl","","",37,[[]]],[11,"shl","","",38,[[]]],[11,"shl","","",38,[[]]],[11,"shl","","",40,[[]]],[11,"shl","","",40,[[]]],[11,"shl","","",41,[[]]],[11,"shl","","",41,[[]]],[11,"shl","","",44,[[]]],[11,"shl","","",44,[[]]],[11,"shl","","",45,[[]]],[11,"shl","","",45,[[]]],[11,"shl","","",51,[[]]],[11,"shl","","",51,[[]]],[11,"shl","","",52,[[]]],[11,"shl","","",52,[[]]],[11,"shl","","",54,[[]]],[11,"shl","","",54,[[]]],[11,"shl","","",55,[[]]],[11,"shl","","",55,[[]]],[11,"shl","","",57,[[]]],[11,"shl","","",57,[[]]],[11,"shl","","",58,[[]]],[11,"shl","","",58,[[]]],[11,"shl","","",60,[[]]],[11,"shl","","",60,[[]]],[11,"shl","","",61,[[]]],[11,"shl","","",61,[[]]],[11,"shl","","",64,[[]]],[11,"shl","","",64,[[]]],[11,"shl","","",65,[[]]],[11,"shl","","",65,[[]]],[11,"shl","","",68,[[]]],[11,"shl","","",68,[[]]],[11,"shl","","",69,[[]]],[11,"shl","","",69,[[]]],[11,"shl","","",74,[[]]],[11,"shl","","",74,[[]]],[11,"shl","","",75,[[]]],[11,"shl","","",75,[[]]],[11,"shl","","",77,[[]]],[11,"shl","","",77,[[]]],[11,"shl","","",78,[[]]],[11,"shl","","",78,[[]]],[11,"shl","","",80,[[]]],[11,"shl","","",80,[[]]],[11,"shl","","",81,[[]]],[11,"shl","","",81,[[]]],[11,"shl","","",84,[[]]],[11,"shl","","",84,[[]]],[11,"shl","","",85,[[]]],[11,"shl","","",85,[[]]],[11,"shl","","",88,[[]]],[11,"shl","","",88,[[]]],[11,"shl","","",89,[[]]],[11,"shl","","",89,[[]]],[11,"shl","","",48,[[]]],[11,"shl","","",48,[[]]],[11,"shl","","",49,[[]]],[11,"shl","","",49,[[]]],[11,"shl","","",71,[[]]],[11,"shl","","",71,[[]]],[11,"shl","","",72,[[]]],[11,"shl","","",72,[[]]],[11,"shl","","",91,[[]]],[11,"shl","","",91,[[]]],[11,"shl","","",92,[[]]],[11,"shl","","",92,[[]]],[11,"shr","","",15,[[]]],[11,"shr","","",15,[[]]],[11,"shr","","",16,[[]]],[11,"shr","","",16,[[]]],[11,"shr","","",18,[[]]],[11,"shr","","",18,[[]]],[11,"shr","","",19,[[]]],[11,"shr","","",19,[[]]],[11,"shr","","",21,[[]]],[11,"shr","","",21,[[]]],[11,"shr","","",22,[[]]],[11,"shr","","",22,[[]]],[11,"shr","","",24,[[]]],[11,"shr","","",24,[[]]],[11,"shr","","",25,[[]]],[11,"shr","","",25,[[]]],[11,"shr","","",27,[[]]],[11,"shr","","",27,[[]]],[11,"shr","","",28,[[]]],[11,"shr","","",28,[[]]],[11,"shr","","",30,[[]]],[11,"shr","","",30,[[]]],[11,"shr","","",31,[[]]],[11,"shr","","",31,[[]]],[11,"shr","","",34,[[]]],[11,"shr","","",34,[[]]],[11,"shr","","",35,[[]]],[11,"shr","","",35,[[]]],[11,"shr","","",37,[[]]],[11,"shr","","",37,[[]]],[11,"shr","","",38,[[]]],[11,"shr","","",38,[[]]],[11,"shr","","",40,[[]]],[11,"shr","","",40,[[]]],[11,"shr","","",41,[[]]],[11,"shr","","",41,[[]]],[11,"shr","","",44,[[]]],[11,"shr","","",44,[[]]],[11,"shr","","",45,[[]]],[11,"shr","","",45,[[]]],[11,"shr","","",51,[[]]],[11,"shr","","",51,[[]]],[11,"shr","","",52,[[]]],[11,"shr","","",52,[[]]],[11,"shr","","",54,[[]]],[11,"shr","","",54,[[]]],[11,"shr","","",55,[[]]],[11,"shr","","",55,[[]]],[11,"shr","","",57,[[]]],[11,"shr","","",57,[[]]],[11,"shr","","",58,[[]]],[11,"shr","","",58,[[]]],[11,"shr","","",60,[[]]],[11,"shr","","",60,[[]]],[11,"shr","","",61,[[]]],[11,"shr","","",61,[[]]],[11,"shr","","",64,[[]]],[11,"shr","","",64,[[]]],[11,"shr","","",65,[[]]],[11,"shr","","",65,[[]]],[11,"shr","","",68,[[]]],[11,"shr","","",68,[[]]],[11,"shr","","",69,[[]]],[11,"shr","","",69,[[]]],[11,"shr","","",74,[[]]],[11,"shr","","",74,[[]]],[11,"shr","","",75,[[]]],[11,"shr","","",75,[[]]],[11,"shr","","",77,[[]]],[11,"shr","","",77,[[]]],[11,"shr","","",78,[[]]],[11,"shr","","",78,[[]]],[11,"shr","","",80,[[]]],[11,"shr","","",80,[[]]],[11,"shr","","",81,[[]]],[11,"shr","","",81,[[]]],[11,"shr","","",84,[[]]],[11,"shr","","",84,[[]]],[11,"shr","","",85,[[]]],[11,"shr","","",85,[[]]],[11,"shr","","",88,[[]]],[11,"shr","","",88,[[]]],[11,"shr","","",89,[[]]],[11,"shr","","",89,[[]]],[11,"shr","","",48,[[]]],[11,"shr","","",48,[[]]],[11,"shr","","",49,[[]]],[11,"shr","","",49,[[]]],[11,"shr","","",71,[[]]],[11,"shr","","",71,[[]]],[11,"shr","","",72,[[]]],[11,"shr","","",72,[[]]],[11,"shr","","",91,[[]]],[11,"shr","","",91,[[]]],[11,"shr","","",92,[[]]],[11,"shr","","",92,[[]]],[11,"bitand_assign","","",15,[[]]],[11,"bitand_assign","","",15,[[]]],[11,"bitand_assign","","",16,[[]]],[11,"bitand_assign","","",16,[[]]],[11,"bitand_assign","","",17,[[]]],[11,"bitand_assign","","",17,[[]]],[11,"bitand_assign","","",18,[[]]],[11,"bitand_assign","","",18,[[]]],[11,"bitand_assign","","",19,[[]]],[11,"bitand_assign","","",19,[[]]],[11,"bitand_assign","","",20,[[]]],[11,"bitand_assign","","",20,[[]]],[11,"bitand_assign","","",21,[[]]],[11,"bitand_assign","","",21,[[]]],[11,"bitand_assign","","",22,[[]]],[11,"bitand_assign","","",22,[[]]],[11,"bitand_assign","","",23,[[]]],[11,"bitand_assign","","",23,[[]]],[11,"bitand_assign","","",24,[[]]],[11,"bitand_assign","","",24,[[]]],[11,"bitand_assign","","",25,[[]]],[11,"bitand_assign","","",25,[[]]],[11,"bitand_assign","","",26,[[]]],[11,"bitand_assign","","",26,[[]]],[11,"bitand_assign","","",27,[[]]],[11,"bitand_assign","","",27,[[]]],[11,"bitand_assign","","",28,[[]]],[11,"bitand_assign","","",28,[[]]],[11,"bitand_assign","","",29,[[]]],[11,"bitand_assign","","",29,[[]]],[11,"bitand_assign","","",30,[[]]],[11,"bitand_assign","","",30,[[]]],[11,"bitand_assign","","",31,[[]]],[11,"bitand_assign","","",31,[[]]],[11,"bitand_assign","","",33,[[]]],[11,"bitand_assign","","",33,[[]]],[11,"bitand_assign","","",34,[[]]],[11,"bitand_assign","","",34,[[]]],[11,"bitand_assign","","",35,[[]]],[11,"bitand_assign","","",35,[[]]],[11,"bitand_assign","","",36,[[]]],[11,"bitand_assign","","",36,[[]]],[11,"bitand_assign","","",37,[[]]],[11,"bitand_assign","","",37,[[]]],[11,"bitand_assign","","",38,[[]]],[11,"bitand_assign","","",38,[[]]],[11,"bitand_assign","","",39,[[]]],[11,"bitand_assign","","",39,[[]]],[11,"bitand_assign","","",40,[[]]],[11,"bitand_assign","","",40,[[]]],[11,"bitand_assign","","",41,[[]]],[11,"bitand_assign","","",41,[[]]],[11,"bitand_assign","","",43,[[]]],[11,"bitand_assign","","",43,[[]]],[11,"bitand_assign","","",44,[[]]],[11,"bitand_assign","","",44,[[]]],[11,"bitand_assign","","",45,[[]]],[11,"bitand_assign","","",45,[[]]],[11,"bitand_assign","","",47,[[]]],[11,"bitand_assign","","",47,[[]]],[11,"bitand_assign","","",51,[[]]],[11,"bitand_assign","","",51,[[]]],[11,"bitand_assign","","",52,[[]]],[11,"bitand_assign","","",52,[[]]],[11,"bitand_assign","","",53,[[]]],[11,"bitand_assign","","",53,[[]]],[11,"bitand_assign","","",54,[[]]],[11,"bitand_assign","","",54,[[]]],[11,"bitand_assign","","",55,[[]]],[11,"bitand_assign","","",55,[[]]],[11,"bitand_assign","","",56,[[]]],[11,"bitand_assign","","",56,[[]]],[11,"bitand_assign","","",57,[[]]],[11,"bitand_assign","","",57,[[]]],[11,"bitand_assign","","",58,[[]]],[11,"bitand_assign","","",58,[[]]],[11,"bitand_assign","","",59,[[]]],[11,"bitand_assign","","",59,[[]]],[11,"bitand_assign","","",60,[[]]],[11,"bitand_assign","","",60,[[]]],[11,"bitand_assign","","",61,[[]]],[11,"bitand_assign","","",61,[[]]],[11,"bitand_assign","","",63,[[]]],[11,"bitand_assign","","",63,[[]]],[11,"bitand_assign","","",64,[[]]],[11,"bitand_assign","","",64,[[]]],[11,"bitand_assign","","",65,[[]]],[11,"bitand_assign","","",65,[[]]],[11,"bitand_assign","","",67,[[]]],[11,"bitand_assign","","",67,[[]]],[11,"bitand_assign","","",68,[[]]],[11,"bitand_assign","","",68,[[]]],[11,"bitand_assign","","",69,[[]]],[11,"bitand_assign","","",69,[[]]],[11,"bitand_assign","","",70,[[]]],[11,"bitand_assign","","",70,[[]]],[11,"bitand_assign","","",74,[[]]],[11,"bitand_assign","","",74,[[]]],[11,"bitand_assign","","",75,[[]]],[11,"bitand_assign","","",75,[[]]],[11,"bitand_assign","","",76,[[]]],[11,"bitand_assign","","",76,[[]]],[11,"bitand_assign","","",77,[[]]],[11,"bitand_assign","","",77,[[]]],[11,"bitand_assign","","",78,[[]]],[11,"bitand_assign","","",78,[[]]],[11,"bitand_assign","","",79,[[]]],[11,"bitand_assign","","",79,[[]]],[11,"bitand_assign","","",80,[[]]],[11,"bitand_assign","","",80,[[]]],[11,"bitand_assign","","",81,[[]]],[11,"bitand_assign","","",81,[[]]],[11,"bitand_assign","","",83,[[]]],[11,"bitand_assign","","",83,[[]]],[11,"bitand_assign","","",84,[[]]],[11,"bitand_assign","","",84,[[]]],[11,"bitand_assign","","",85,[[]]],[11,"bitand_assign","","",85,[[]]],[11,"bitand_assign","","",87,[[]]],[11,"bitand_assign","","",87,[[]]],[11,"bitand_assign","","",88,[[]]],[11,"bitand_assign","","",88,[[]]],[11,"bitand_assign","","",89,[[]]],[11,"bitand_assign","","",89,[[]]],[11,"bitand_assign","","",90,[[]]],[11,"bitand_assign","","",90,[[]]],[11,"bitand_assign","","",48,[[]]],[11,"bitand_assign","","",48,[[]]],[11,"bitand_assign","","",49,[[]]],[11,"bitand_assign","","",49,[[]]],[11,"bitand_assign","","",50,[[]]],[11,"bitand_assign","","",50,[[]]],[11,"bitand_assign","","",71,[[]]],[11,"bitand_assign","","",71,[[]]],[11,"bitand_assign","","",72,[[]]],[11,"bitand_assign","","",72,[[]]],[11,"bitand_assign","","",73,[[]]],[11,"bitand_assign","","",73,[[]]],[11,"bitand_assign","","",91,[[]]],[11,"bitand_assign","","",91,[[]]],[11,"bitand_assign","","",92,[[]]],[11,"bitand_assign","","",92,[[]]],[11,"bitand_assign","","",93,[[]]],[11,"bitand_assign","","",93,[[]]],[11,"bitor_assign","","",15,[[]]],[11,"bitor_assign","","",15,[[]]],[11,"bitor_assign","","",16,[[]]],[11,"bitor_assign","","",16,[[]]],[11,"bitor_assign","","",17,[[]]],[11,"bitor_assign","","",17,[[]]],[11,"bitor_assign","","",18,[[]]],[11,"bitor_assign","","",18,[[]]],[11,"bitor_assign","","",19,[[]]],[11,"bitor_assign","","",19,[[]]],[11,"bitor_assign","","",20,[[]]],[11,"bitor_assign","","",20,[[]]],[11,"bitor_assign","","",21,[[]]],[11,"bitor_assign","","",21,[[]]],[11,"bitor_assign","","",22,[[]]],[11,"bitor_assign","","",22,[[]]],[11,"bitor_assign","","",23,[[]]],[11,"bitor_assign","","",23,[[]]],[11,"bitor_assign","","",24,[[]]],[11,"bitor_assign","","",24,[[]]],[11,"bitor_assign","","",25,[[]]],[11,"bitor_assign","","",25,[[]]],[11,"bitor_assign","","",26,[[]]],[11,"bitor_assign","","",26,[[]]],[11,"bitor_assign","","",27,[[]]],[11,"bitor_assign","","",27,[[]]],[11,"bitor_assign","","",28,[[]]],[11,"bitor_assign","","",28,[[]]],[11,"bitor_assign","","",29,[[]]],[11,"bitor_assign","","",29,[[]]],[11,"bitor_assign","","",30,[[]]],[11,"bitor_assign","","",30,[[]]],[11,"bitor_assign","","",31,[[]]],[11,"bitor_assign","","",31,[[]]],[11,"bitor_assign","","",33,[[]]],[11,"bitor_assign","","",33,[[]]],[11,"bitor_assign","","",34,[[]]],[11,"bitor_assign","","",34,[[]]],[11,"bitor_assign","","",35,[[]]],[11,"bitor_assign","","",35,[[]]],[11,"bitor_assign","","",36,[[]]],[11,"bitor_assign","","",36,[[]]],[11,"bitor_assign","","",37,[[]]],[11,"bitor_assign","","",37,[[]]],[11,"bitor_assign","","",38,[[]]],[11,"bitor_assign","","",38,[[]]],[11,"bitor_assign","","",39,[[]]],[11,"bitor_assign","","",39,[[]]],[11,"bitor_assign","","",40,[[]]],[11,"bitor_assign","","",40,[[]]],[11,"bitor_assign","","",41,[[]]],[11,"bitor_assign","","",41,[[]]],[11,"bitor_assign","","",43,[[]]],[11,"bitor_assign","","",43,[[]]],[11,"bitor_assign","","",44,[[]]],[11,"bitor_assign","","",44,[[]]],[11,"bitor_assign","","",45,[[]]],[11,"bitor_assign","","",45,[[]]],[11,"bitor_assign","","",47,[[]]],[11,"bitor_assign","","",47,[[]]],[11,"bitor_assign","","",51,[[]]],[11,"bitor_assign","","",51,[[]]],[11,"bitor_assign","","",52,[[]]],[11,"bitor_assign","","",52,[[]]],[11,"bitor_assign","","",53,[[]]],[11,"bitor_assign","","",53,[[]]],[11,"bitor_assign","","",54,[[]]],[11,"bitor_assign","","",54,[[]]],[11,"bitor_assign","","",55,[[]]],[11,"bitor_assign","","",55,[[]]],[11,"bitor_assign","","",56,[[]]],[11,"bitor_assign","","",56,[[]]],[11,"bitor_assign","","",57,[[]]],[11,"bitor_assign","","",57,[[]]],[11,"bitor_assign","","",58,[[]]],[11,"bitor_assign","","",58,[[]]],[11,"bitor_assign","","",59,[[]]],[11,"bitor_assign","","",59,[[]]],[11,"bitor_assign","","",60,[[]]],[11,"bitor_assign","","",60,[[]]],[11,"bitor_assign","","",61,[[]]],[11,"bitor_assign","","",61,[[]]],[11,"bitor_assign","","",63,[[]]],[11,"bitor_assign","","",63,[[]]],[11,"bitor_assign","","",64,[[]]],[11,"bitor_assign","","",64,[[]]],[11,"bitor_assign","","",65,[[]]],[11,"bitor_assign","","",65,[[]]],[11,"bitor_assign","","",67,[[]]],[11,"bitor_assign","","",67,[[]]],[11,"bitor_assign","","",68,[[]]],[11,"bitor_assign","","",68,[[]]],[11,"bitor_assign","","",69,[[]]],[11,"bitor_assign","","",69,[[]]],[11,"bitor_assign","","",70,[[]]],[11,"bitor_assign","","",70,[[]]],[11,"bitor_assign","","",74,[[]]],[11,"bitor_assign","","",74,[[]]],[11,"bitor_assign","","",75,[[]]],[11,"bitor_assign","","",75,[[]]],[11,"bitor_assign","","",76,[[]]],[11,"bitor_assign","","",76,[[]]],[11,"bitor_assign","","",77,[[]]],[11,"bitor_assign","","",77,[[]]],[11,"bitor_assign","","",78,[[]]],[11,"bitor_assign","","",78,[[]]],[11,"bitor_assign","","",79,[[]]],[11,"bitor_assign","","",79,[[]]],[11,"bitor_assign","","",80,[[]]],[11,"bitor_assign","","",80,[[]]],[11,"bitor_assign","","",81,[[]]],[11,"bitor_assign","","",81,[[]]],[11,"bitor_assign","","",83,[[]]],[11,"bitor_assign","","",83,[[]]],[11,"bitor_assign","","",84,[[]]],[11,"bitor_assign","","",84,[[]]],[11,"bitor_assign","","",85,[[]]],[11,"bitor_assign","","",85,[[]]],[11,"bitor_assign","","",87,[[]]],[11,"bitor_assign","","",87,[[]]],[11,"bitor_assign","","",88,[[]]],[11,"bitor_assign","","",88,[[]]],[11,"bitor_assign","","",89,[[]]],[11,"bitor_assign","","",89,[[]]],[11,"bitor_assign","","",90,[[]]],[11,"bitor_assign","","",90,[[]]],[11,"bitor_assign","","",48,[[]]],[11,"bitor_assign","","",48,[[]]],[11,"bitor_assign","","",49,[[]]],[11,"bitor_assign","","",49,[[]]],[11,"bitor_assign","","",50,[[]]],[11,"bitor_assign","","",50,[[]]],[11,"bitor_assign","","",71,[[]]],[11,"bitor_assign","","",71,[[]]],[11,"bitor_assign","","",72,[[]]],[11,"bitor_assign","","",72,[[]]],[11,"bitor_assign","","",73,[[]]],[11,"bitor_assign","","",73,[[]]],[11,"bitor_assign","","",91,[[]]],[11,"bitor_assign","","",91,[[]]],[11,"bitor_assign","","",92,[[]]],[11,"bitor_assign","","",92,[[]]],[11,"bitor_assign","","",93,[[]]],[11,"bitor_assign","","",93,[[]]],[11,"bitxor_assign","","",15,[[]]],[11,"bitxor_assign","","",15,[[]]],[11,"bitxor_assign","","",16,[[]]],[11,"bitxor_assign","","",16,[[]]],[11,"bitxor_assign","","",17,[[]]],[11,"bitxor_assign","","",17,[[]]],[11,"bitxor_assign","","",18,[[]]],[11,"bitxor_assign","","",18,[[]]],[11,"bitxor_assign","","",19,[[]]],[11,"bitxor_assign","","",19,[[]]],[11,"bitxor_assign","","",20,[[]]],[11,"bitxor_assign","","",20,[[]]],[11,"bitxor_assign","","",21,[[]]],[11,"bitxor_assign","","",21,[[]]],[11,"bitxor_assign","","",22,[[]]],[11,"bitxor_assign","","",22,[[]]],[11,"bitxor_assign","","",23,[[]]],[11,"bitxor_assign","","",23,[[]]],[11,"bitxor_assign","","",24,[[]]],[11,"bitxor_assign","","",24,[[]]],[11,"bitxor_assign","","",25,[[]]],[11,"bitxor_assign","","",25,[[]]],[11,"bitxor_assign","","",26,[[]]],[11,"bitxor_assign","","",26,[[]]],[11,"bitxor_assign","","",27,[[]]],[11,"bitxor_assign","","",27,[[]]],[11,"bitxor_assign","","",28,[[]]],[11,"bitxor_assign","","",28,[[]]],[11,"bitxor_assign","","",29,[[]]],[11,"bitxor_assign","","",29,[[]]],[11,"bitxor_assign","","",30,[[]]],[11,"bitxor_assign","","",30,[[]]],[11,"bitxor_assign","","",31,[[]]],[11,"bitxor_assign","","",31,[[]]],[11,"bitxor_assign","","",33,[[]]],[11,"bitxor_assign","","",33,[[]]],[11,"bitxor_assign","","",34,[[]]],[11,"bitxor_assign","","",34,[[]]],[11,"bitxor_assign","","",35,[[]]],[11,"bitxor_assign","","",35,[[]]],[11,"bitxor_assign","","",36,[[]]],[11,"bitxor_assign","","",36,[[]]],[11,"bitxor_assign","","",37,[[]]],[11,"bitxor_assign","","",37,[[]]],[11,"bitxor_assign","","",38,[[]]],[11,"bitxor_assign","","",38,[[]]],[11,"bitxor_assign","","",39,[[]]],[11,"bitxor_assign","","",39,[[]]],[11,"bitxor_assign","","",40,[[]]],[11,"bitxor_assign","","",40,[[]]],[11,"bitxor_assign","","",41,[[]]],[11,"bitxor_assign","","",41,[[]]],[11,"bitxor_assign","","",43,[[]]],[11,"bitxor_assign","","",43,[[]]],[11,"bitxor_assign","","",44,[[]]],[11,"bitxor_assign","","",44,[[]]],[11,"bitxor_assign","","",45,[[]]],[11,"bitxor_assign","","",45,[[]]],[11,"bitxor_assign","","",47,[[]]],[11,"bitxor_assign","","",47,[[]]],[11,"bitxor_assign","","",51,[[]]],[11,"bitxor_assign","","",51,[[]]],[11,"bitxor_assign","","",52,[[]]],[11,"bitxor_assign","","",52,[[]]],[11,"bitxor_assign","","",53,[[]]],[11,"bitxor_assign","","",53,[[]]],[11,"bitxor_assign","","",54,[[]]],[11,"bitxor_assign","","",54,[[]]],[11,"bitxor_assign","","",55,[[]]],[11,"bitxor_assign","","",55,[[]]],[11,"bitxor_assign","","",56,[[]]],[11,"bitxor_assign","","",56,[[]]],[11,"bitxor_assign","","",57,[[]]],[11,"bitxor_assign","","",57,[[]]],[11,"bitxor_assign","","",58,[[]]],[11,"bitxor_assign","","",58,[[]]],[11,"bitxor_assign","","",59,[[]]],[11,"bitxor_assign","","",59,[[]]],[11,"bitxor_assign","","",60,[[]]],[11,"bitxor_assign","","",60,[[]]],[11,"bitxor_assign","","",61,[[]]],[11,"bitxor_assign","","",61,[[]]],[11,"bitxor_assign","","",63,[[]]],[11,"bitxor_assign","","",63,[[]]],[11,"bitxor_assign","","",64,[[]]],[11,"bitxor_assign","","",64,[[]]],[11,"bitxor_assign","","",65,[[]]],[11,"bitxor_assign","","",65,[[]]],[11,"bitxor_assign","","",67,[[]]],[11,"bitxor_assign","","",67,[[]]],[11,"bitxor_assign","","",68,[[]]],[11,"bitxor_assign","","",68,[[]]],[11,"bitxor_assign","","",69,[[]]],[11,"bitxor_assign","","",69,[[]]],[11,"bitxor_assign","","",70,[[]]],[11,"bitxor_assign","","",70,[[]]],[11,"bitxor_assign","","",74,[[]]],[11,"bitxor_assign","","",74,[[]]],[11,"bitxor_assign","","",75,[[]]],[11,"bitxor_assign","","",75,[[]]],[11,"bitxor_assign","","",76,[[]]],[11,"bitxor_assign","","",76,[[]]],[11,"bitxor_assign","","",77,[[]]],[11,"bitxor_assign","","",77,[[]]],[11,"bitxor_assign","","",78,[[]]],[11,"bitxor_assign","","",78,[[]]],[11,"bitxor_assign","","",79,[[]]],[11,"bitxor_assign","","",79,[[]]],[11,"bitxor_assign","","",80,[[]]],[11,"bitxor_assign","","",80,[[]]],[11,"bitxor_assign","","",81,[[]]],[11,"bitxor_assign","","",81,[[]]],[11,"bitxor_assign","","",83,[[]]],[11,"bitxor_assign","","",83,[[]]],[11,"bitxor_assign","","",84,[[]]],[11,"bitxor_assign","","",84,[[]]],[11,"bitxor_assign","","",85,[[]]],[11,"bitxor_assign","","",85,[[]]],[11,"bitxor_assign","","",87,[[]]],[11,"bitxor_assign","","",87,[[]]],[11,"bitxor_assign","","",88,[[]]],[11,"bitxor_assign","","",88,[[]]],[11,"bitxor_assign","","",89,[[]]],[11,"bitxor_assign","","",89,[[]]],[11,"bitxor_assign","","",90,[[]]],[11,"bitxor_assign","","",90,[[]]],[11,"bitxor_assign","","",48,[[]]],[11,"bitxor_assign","","",48,[[]]],[11,"bitxor_assign","","",49,[[]]],[11,"bitxor_assign","","",49,[[]]],[11,"bitxor_assign","","",50,[[]]],[11,"bitxor_assign","","",50,[[]]],[11,"bitxor_assign","","",71,[[]]],[11,"bitxor_assign","","",71,[[]]],[11,"bitxor_assign","","",72,[[]]],[11,"bitxor_assign","","",72,[[]]],[11,"bitxor_assign","","",73,[[]]],[11,"bitxor_assign","","",73,[[]]],[11,"bitxor_assign","","",91,[[]]],[11,"bitxor_assign","","",91,[[]]],[11,"bitxor_assign","","",92,[[]]],[11,"bitxor_assign","","",92,[[]]],[11,"bitxor_assign","","",93,[[]]],[11,"bitxor_assign","","",93,[[]]],[11,"shl_assign","","",15,[[]]],[11,"shl_assign","","",15,[[]]],[11,"shl_assign","","",16,[[]]],[11,"shl_assign","","",16,[[]]],[11,"shl_assign","","",18,[[]]],[11,"shl_assign","","",18,[[]]],[11,"shl_assign","","",19,[[]]],[11,"shl_assign","","",19,[[]]],[11,"shl_assign","","",21,[[]]],[11,"shl_assign","","",21,[[]]],[11,"shl_assign","","",22,[[]]],[11,"shl_assign","","",22,[[]]],[11,"shl_assign","","",24,[[]]],[11,"shl_assign","","",24,[[]]],[11,"shl_assign","","",25,[[]]],[11,"shl_assign","","",25,[[]]],[11,"shl_assign","","",27,[[]]],[11,"shl_assign","","",27,[[]]],[11,"shl_assign","","",28,[[]]],[11,"shl_assign","","",28,[[]]],[11,"shl_assign","","",30,[[]]],[11,"shl_assign","","",30,[[]]],[11,"shl_assign","","",31,[[]]],[11,"shl_assign","","",31,[[]]],[11,"shl_assign","","",34,[[]]],[11,"shl_assign","","",34,[[]]],[11,"shl_assign","","",35,[[]]],[11,"shl_assign","","",35,[[]]],[11,"shl_assign","","",37,[[]]],[11,"shl_assign","","",37,[[]]],[11,"shl_assign","","",38,[[]]],[11,"shl_assign","","",38,[[]]],[11,"shl_assign","","",40,[[]]],[11,"shl_assign","","",40,[[]]],[11,"shl_assign","","",41,[[]]],[11,"shl_assign","","",41,[[]]],[11,"shl_assign","","",44,[[]]],[11,"shl_assign","","",44,[[]]],[11,"shl_assign","","",45,[[]]],[11,"shl_assign","","",45,[[]]],[11,"shl_assign","","",51,[[]]],[11,"shl_assign","","",51,[[]]],[11,"shl_assign","","",52,[[]]],[11,"shl_assign","","",52,[[]]],[11,"shl_assign","","",54,[[]]],[11,"shl_assign","","",54,[[]]],[11,"shl_assign","","",55,[[]]],[11,"shl_assign","","",55,[[]]],[11,"shl_assign","","",57,[[]]],[11,"shl_assign","","",57,[[]]],[11,"shl_assign","","",58,[[]]],[11,"shl_assign","","",58,[[]]],[11,"shl_assign","","",60,[[]]],[11,"shl_assign","","",60,[[]]],[11,"shl_assign","","",61,[[]]],[11,"shl_assign","","",61,[[]]],[11,"shl_assign","","",64,[[]]],[11,"shl_assign","","",64,[[]]],[11,"shl_assign","","",65,[[]]],[11,"shl_assign","","",65,[[]]],[11,"shl_assign","","",68,[[]]],[11,"shl_assign","","",68,[[]]],[11,"shl_assign","","",69,[[]]],[11,"shl_assign","","",69,[[]]],[11,"shl_assign","","",74,[[]]],[11,"shl_assign","","",74,[[]]],[11,"shl_assign","","",75,[[]]],[11,"shl_assign","","",75,[[]]],[11,"shl_assign","","",77,[[]]],[11,"shl_assign","","",77,[[]]],[11,"shl_assign","","",78,[[]]],[11,"shl_assign","","",78,[[]]],[11,"shl_assign","","",80,[[]]],[11,"shl_assign","","",80,[[]]],[11,"shl_assign","","",81,[[]]],[11,"shl_assign","","",81,[[]]],[11,"shl_assign","","",84,[[]]],[11,"shl_assign","","",84,[[]]],[11,"shl_assign","","",85,[[]]],[11,"shl_assign","","",85,[[]]],[11,"shl_assign","","",88,[[]]],[11,"shl_assign","","",88,[[]]],[11,"shl_assign","","",89,[[]]],[11,"shl_assign","","",89,[[]]],[11,"shl_assign","","",48,[[]]],[11,"shl_assign","","",48,[[]]],[11,"shl_assign","","",49,[[]]],[11,"shl_assign","","",49,[[]]],[11,"shl_assign","","",71,[[]]],[11,"shl_assign","","",71,[[]]],[11,"shl_assign","","",72,[[]]],[11,"shl_assign","","",72,[[]]],[11,"shl_assign","","",91,[[]]],[11,"shl_assign","","",91,[[]]],[11,"shl_assign","","",92,[[]]],[11,"shl_assign","","",92,[[]]],[11,"shr_assign","","",15,[[]]],[11,"shr_assign","","",15,[[]]],[11,"shr_assign","","",16,[[]]],[11,"shr_assign","","",16,[[]]],[11,"shr_assign","","",18,[[]]],[11,"shr_assign","","",18,[[]]],[11,"shr_assign","","",19,[[]]],[11,"shr_assign","","",19,[[]]],[11,"shr_assign","","",21,[[]]],[11,"shr_assign","","",21,[[]]],[11,"shr_assign","","",22,[[]]],[11,"shr_assign","","",22,[[]]],[11,"shr_assign","","",24,[[]]],[11,"shr_assign","","",24,[[]]],[11,"shr_assign","","",25,[[]]],[11,"shr_assign","","",25,[[]]],[11,"shr_assign","","",27,[[]]],[11,"shr_assign","","",27,[[]]],[11,"shr_assign","","",28,[[]]],[11,"shr_assign","","",28,[[]]],[11,"shr_assign","","",30,[[]]],[11,"shr_assign","","",30,[[]]],[11,"shr_assign","","",31,[[]]],[11,"shr_assign","","",31,[[]]],[11,"shr_assign","","",34,[[]]],[11,"shr_assign","","",34,[[]]],[11,"shr_assign","","",35,[[]]],[11,"shr_assign","","",35,[[]]],[11,"shr_assign","","",37,[[]]],[11,"shr_assign","","",37,[[]]],[11,"shr_assign","","",38,[[]]],[11,"shr_assign","","",38,[[]]],[11,"shr_assign","","",40,[[]]],[11,"shr_assign","","",40,[[]]],[11,"shr_assign","","",41,[[]]],[11,"shr_assign","","",41,[[]]],[11,"shr_assign","","",44,[[]]],[11,"shr_assign","","",44,[[]]],[11,"shr_assign","","",45,[[]]],[11,"shr_assign","","",45,[[]]],[11,"shr_assign","","",51,[[]]],[11,"shr_assign","","",51,[[]]],[11,"shr_assign","","",52,[[]]],[11,"shr_assign","","",52,[[]]],[11,"shr_assign","","",54,[[]]],[11,"shr_assign","","",54,[[]]],[11,"shr_assign","","",55,[[]]],[11,"shr_assign","","",55,[[]]],[11,"shr_assign","","",57,[[]]],[11,"shr_assign","","",57,[[]]],[11,"shr_assign","","",58,[[]]],[11,"shr_assign","","",58,[[]]],[11,"shr_assign","","",60,[[]]],[11,"shr_assign","","",60,[[]]],[11,"shr_assign","","",61,[[]]],[11,"shr_assign","","",61,[[]]],[11,"shr_assign","","",64,[[]]],[11,"shr_assign","","",64,[[]]],[11,"shr_assign","","",65,[[]]],[11,"shr_assign","","",65,[[]]],[11,"shr_assign","","",68,[[]]],[11,"shr_assign","","",68,[[]]],[11,"shr_assign","","",69,[[]]],[11,"shr_assign","","",69,[[]]],[11,"shr_assign","","",74,[[]]],[11,"shr_assign","","",74,[[]]],[11,"shr_assign","","",75,[[]]],[11,"shr_assign","","",75,[[]]],[11,"shr_assign","","",77,[[]]],[11,"shr_assign","","",77,[[]]],[11,"shr_assign","","",78,[[]]],[11,"shr_assign","","",78,[[]]],[11,"shr_assign","","",80,[[]]],[11,"shr_assign","","",80,[[]]],[11,"shr_assign","","",81,[[]]],[11,"shr_assign","","",81,[[]]],[11,"shr_assign","","",84,[[]]],[11,"shr_assign","","",84,[[]]],[11,"shr_assign","","",85,[[]]],[11,"shr_assign","","",85,[[]]],[11,"shr_assign","","",88,[[]]],[11,"shr_assign","","",88,[[]]],[11,"shr_assign","","",89,[[]]],[11,"shr_assign","","",89,[[]]],[11,"shr_assign","","",48,[[]]],[11,"shr_assign","","",48,[[]]],[11,"shr_assign","","",49,[[]]],[11,"shr_assign","","",49,[[]]],[11,"shr_assign","","",71,[[]]],[11,"shr_assign","","",71,[[]]],[11,"shr_assign","","",72,[[]]],[11,"shr_assign","","",72,[[]]],[11,"shr_assign","","",91,[[]]],[11,"shr_assign","","",91,[[]]],[11,"shr_assign","","",92,[[]]],[11,"shr_assign","","",92,[[]]],[11,"hash","","",0,[[]]],[11,"hash","","",1,[[]]],[11,"hash","","",2,[[]]],[11,"hash","","",3,[[]]],[11,"hash","","",4,[[]]],[11,"hash","","",5,[[]]],[11,"hash","","",15,[[]]],[11,"hash","","",16,[[]]],[11,"hash","","",18,[[]]],[11,"hash","","",19,[[]]],[11,"hash","","",21,[[]]],[11,"hash","","",22,[[]]],[11,"hash","","",24,[[]]],[11,"hash","","",25,[[]]],[11,"hash","","",27,[[]]],[11,"hash","","",28,[[]]],[11,"hash","","",30,[[]]],[11,"hash","","",31,[[]]],[11,"hash","","",34,[[]]],[11,"hash","","",35,[[]]],[11,"hash","","",37,[[]]],[11,"hash","","",38,[[]]],[11,"hash","","",40,[[]]],[11,"hash","","",41,[[]]],[11,"hash","","",44,[[]]],[11,"hash","","",45,[[]]],[11,"hash","","",51,[[]]],[11,"hash","","",52,[[]]],[11,"hash","","",54,[[]]],[11,"hash","","",55,[[]]],[11,"hash","","",57,[[]]],[11,"hash","","",58,[[]]],[11,"hash","","",60,[[]]],[11,"hash","","",61,[[]]],[11,"hash","","",64,[[]]],[11,"hash","","",65,[[]]],[11,"hash","","",68,[[]]],[11,"hash","","",69,[[]]],[11,"hash","","",74,[[]]],[11,"hash","","",75,[[]]],[11,"hash","","",77,[[]]],[11,"hash","","",78,[[]]],[11,"hash","","",80,[[]]],[11,"hash","","",81,[[]]],[11,"hash","","",84,[[]]],[11,"hash","","",85,[[]]],[11,"hash","","",88,[[]]],[11,"hash","","",89,[[]]],[11,"hash","","",48,[[]]],[11,"hash","","",49,[[]]],[11,"hash","","",71,[[]]],[11,"hash","","",72,[[]]],[11,"hash","","",91,[[]]],[11,"hash","","",92,[[]]],[11,"hash","","",94,[[]]],[11,"hash","","",95,[[]]],[11,"hash","","",96,[[]]],[11,"hash","","",97,[[]]],[11,"hash","","",98,[[]]],[11,"hash","","",99,[[]]],[11,"product","","",15,[[["iterator",8]],["i8x2",6]]],[11,"product","","",15,[[["iterator",8]],["i8x2",6]]],[11,"product","","",16,[[["iterator",8]],["u8x2",6]]],[11,"product","","",16,[[["iterator",8]],["u8x2",6]]],[11,"product","","",18,[[["iterator",8]],["i8x4",6]]],[11,"product","","",18,[[["iterator",8]],["i8x4",6]]],[11,"product","","",19,[[["iterator",8]],["u8x4",6]]],[11,"product","","",19,[[["iterator",8]],["u8x4",6]]],[11,"product","","",21,[[["iterator",8]],["i16x2",6]]],[11,"product","","",21,[[["iterator",8]],["i16x2",6]]],[11,"product","","",22,[[["iterator",8]],["u16x2",6]]],[11,"product","","",22,[[["iterator",8]],["u16x2",6]]],[11,"product","","",24,[[["iterator",8]],["i8x8",6]]],[11,"product","","",24,[[["iterator",8]],["i8x8",6]]],[11,"product","","",25,[[["iterator",8]],["u8x8",6]]],[11,"product","","",25,[[["iterator",8]],["u8x8",6]]],[11,"product","","",27,[[["iterator",8]],["i16x4",6]]],[11,"product","","",27,[[["iterator",8]],["i16x4",6]]],[11,"product","","",28,[[["iterator",8]],["u16x4",6]]],[11,"product","","",28,[[["iterator",8]],["u16x4",6]]],[11,"product","","",30,[[["iterator",8]],["i32x2",6]]],[11,"product","","",30,[[["iterator",8]],["i32x2",6]]],[11,"product","","",31,[[["iterator",8]],["u32x2",6]]],[11,"product","","",31,[[["iterator",8]],["u32x2",6]]],[11,"product","","",32,[[["iterator",8]],["f32x2",6]]],[11,"product","","",32,[[["iterator",8]],["f32x2",6]]],[11,"product","","",34,[[["iterator",8]],["i8x16",6]]],[11,"product","","",34,[[["iterator",8]],["i8x16",6]]],[11,"product","","",35,[[["iterator",8]],["u8x16",6]]],[11,"product","","",35,[[["iterator",8]],["u8x16",6]]],[11,"product","","",37,[[["iterator",8]],["i16x8",6]]],[11,"product","","",37,[[["iterator",8]],["i16x8",6]]],[11,"product","","",38,[[["iterator",8]],["u16x8",6]]],[11,"product","","",38,[[["iterator",8]],["u16x8",6]]],[11,"product","","",40,[[["iterator",8]],["i32x4",6]]],[11,"product","","",40,[[["iterator",8]],["i32x4",6]]],[11,"product","","",41,[[["iterator",8]],["u32x4",6]]],[11,"product","","",41,[[["iterator",8]],["u32x4",6]]],[11,"product","","",42,[[["iterator",8]],["f32x4",6]]],[11,"product","","",42,[[["iterator",8]],["f32x4",6]]],[11,"product","","",44,[[["iterator",8]],["i64x2",6]]],[11,"product","","",44,[[["iterator",8]],["i64x2",6]]],[11,"product","","",45,[[["iterator",8]],["u64x2",6]]],[11,"product","","",45,[[["iterator",8]],["u64x2",6]]],[11,"product","","",46,[[["iterator",8]],["f64x2",6]]],[11,"product","","",46,[[["iterator",8]],["f64x2",6]]],[11,"product","","",51,[[["iterator",8]],["i128x1",6]]],[11,"product","","",51,[[["iterator",8]],["i128x1",6]]],[11,"product","","",52,[[["iterator",8]],["u128x1",6]]],[11,"product","","",52,[[["iterator",8]],["u128x1",6]]],[11,"product","","",54,[[["iterator",8]],["i8x32",6]]],[11,"product","","",54,[[["iterator",8]],["i8x32",6]]],[11,"product","","",55,[[["iterator",8]],["u8x32",6]]],[11,"product","","",55,[[["iterator",8]],["u8x32",6]]],[11,"product","","",57,[[["iterator",8]],["i16x16",6]]],[11,"product","","",57,[[["iterator",8]],["i16x16",6]]],[11,"product","","",58,[[["iterator",8]],["u16x16",6]]],[11,"product","","",58,[[["iterator",8]],["u16x16",6]]],[11,"product","","",60,[[["iterator",8]],["i32x8",6]]],[11,"product","","",60,[[["iterator",8]],["i32x8",6]]],[11,"product","","",61,[[["iterator",8]],["u32x8",6]]],[11,"product","","",61,[[["iterator",8]],["u32x8",6]]],[11,"product","","",62,[[["iterator",8]],["f32x8",6]]],[11,"product","","",62,[[["iterator",8]],["f32x8",6]]],[11,"product","","",64,[[["iterator",8]],["i64x4",6]]],[11,"product","","",64,[[["iterator",8]],["i64x4",6]]],[11,"product","","",65,[[["iterator",8]],["u64x4",6]]],[11,"product","","",65,[[["iterator",8]],["u64x4",6]]],[11,"product","","",66,[[["iterator",8]],["f64x4",6]]],[11,"product","","",66,[[["iterator",8]],["f64x4",6]]],[11,"product","","",68,[[["iterator",8]],["i128x2",6]]],[11,"product","","",68,[[["iterator",8]],["i128x2",6]]],[11,"product","","",69,[[["iterator",8]],["u128x2",6]]],[11,"product","","",69,[[["iterator",8]],["u128x2",6]]],[11,"product","","",74,[[["iterator",8]],["i8x64",6]]],[11,"product","","",74,[[["iterator",8]],["i8x64",6]]],[11,"product","","",75,[[["iterator",8]],["u8x64",6]]],[11,"product","","",75,[[["iterator",8]],["u8x64",6]]],[11,"product","","",77,[[["iterator",8]],["i16x32",6]]],[11,"product","","",77,[[["iterator",8]],["i16x32",6]]],[11,"product","","",78,[[["iterator",8]],["u16x32",6]]],[11,"product","","",78,[[["iterator",8]],["u16x32",6]]],[11,"product","","",80,[[["iterator",8]],["i32x16",6]]],[11,"product","","",80,[[["iterator",8]],["i32x16",6]]],[11,"product","","",81,[[["iterator",8]],["u32x16",6]]],[11,"product","","",81,[[["iterator",8]],["u32x16",6]]],[11,"product","","",82,[[["iterator",8]],["f32x16",6]]],[11,"product","","",82,[[["iterator",8]],["f32x16",6]]],[11,"product","","",84,[[["iterator",8]],["i64x8",6]]],[11,"product","","",84,[[["iterator",8]],["i64x8",6]]],[11,"product","","",85,[[["iterator",8]],["u64x8",6]]],[11,"product","","",85,[[["iterator",8]],["u64x8",6]]],[11,"product","","",86,[[["iterator",8]],["f64x8",6]]],[11,"product","","",86,[[["iterator",8]],["f64x8",6]]],[11,"product","","",88,[[["iterator",8]],["i128x4",6]]],[11,"product","","",88,[[["iterator",8]],["i128x4",6]]],[11,"product","","",89,[[["iterator",8]],["u128x4",6]]],[11,"product","","",89,[[["iterator",8]],["u128x4",6]]],[11,"product","","",48,[[["iterator",8]],["isizex2",6]]],[11,"product","","",48,[[["iterator",8]],["isizex2",6]]],[11,"product","","",49,[[["iterator",8]],["usizex2",6]]],[11,"product","","",49,[[["iterator",8]],["usizex2",6]]],[11,"product","","",71,[[["iterator",8]],["isizex4",6]]],[11,"product","","",71,[[["iterator",8]],["isizex4",6]]],[11,"product","","",72,[[["iterator",8]],["usizex4",6]]],[11,"product","","",72,[[["iterator",8]],["usizex4",6]]],[11,"product","","",91,[[["iterator",8]],["isizex8",6]]],[11,"product","","",91,[[["iterator",8]],["isizex8",6]]],[11,"product","","",92,[[["iterator",8]],["usizex8",6]]],[11,"product","","",92,[[["iterator",8]],["usizex8",6]]],[11,"sum","","",15,[[["iterator",8]],["i8x2",6]]],[11,"sum","","",15,[[["iterator",8]],["i8x2",6]]],[11,"sum","","",16,[[["iterator",8]],["u8x2",6]]],[11,"sum","","",16,[[["iterator",8]],["u8x2",6]]],[11,"sum","","",18,[[["iterator",8]],["i8x4",6]]],[11,"sum","","",18,[[["iterator",8]],["i8x4",6]]],[11,"sum","","",19,[[["iterator",8]],["u8x4",6]]],[11,"sum","","",19,[[["iterator",8]],["u8x4",6]]],[11,"sum","","",21,[[["iterator",8]],["i16x2",6]]],[11,"sum","","",21,[[["iterator",8]],["i16x2",6]]],[11,"sum","","",22,[[["iterator",8]],["u16x2",6]]],[11,"sum","","",22,[[["iterator",8]],["u16x2",6]]],[11,"sum","","",24,[[["iterator",8]],["i8x8",6]]],[11,"sum","","",24,[[["iterator",8]],["i8x8",6]]],[11,"sum","","",25,[[["iterator",8]],["u8x8",6]]],[11,"sum","","",25,[[["iterator",8]],["u8x8",6]]],[11,"sum","","",27,[[["iterator",8]],["i16x4",6]]],[11,"sum","","",27,[[["iterator",8]],["i16x4",6]]],[11,"sum","","",28,[[["iterator",8]],["u16x4",6]]],[11,"sum","","",28,[[["iterator",8]],["u16x4",6]]],[11,"sum","","",30,[[["iterator",8]],["i32x2",6]]],[11,"sum","","",30,[[["iterator",8]],["i32x2",6]]],[11,"sum","","",31,[[["iterator",8]],["u32x2",6]]],[11,"sum","","",31,[[["iterator",8]],["u32x2",6]]],[11,"sum","","",32,[[["iterator",8]],["f32x2",6]]],[11,"sum","","",32,[[["iterator",8]],["f32x2",6]]],[11,"sum","","",34,[[["iterator",8]],["i8x16",6]]],[11,"sum","","",34,[[["iterator",8]],["i8x16",6]]],[11,"sum","","",35,[[["iterator",8]],["u8x16",6]]],[11,"sum","","",35,[[["iterator",8]],["u8x16",6]]],[11,"sum","","",37,[[["iterator",8]],["i16x8",6]]],[11,"sum","","",37,[[["iterator",8]],["i16x8",6]]],[11,"sum","","",38,[[["iterator",8]],["u16x8",6]]],[11,"sum","","",38,[[["iterator",8]],["u16x8",6]]],[11,"sum","","",40,[[["iterator",8]],["i32x4",6]]],[11,"sum","","",40,[[["iterator",8]],["i32x4",6]]],[11,"sum","","",41,[[["iterator",8]],["u32x4",6]]],[11,"sum","","",41,[[["iterator",8]],["u32x4",6]]],[11,"sum","","",42,[[["iterator",8]],["f32x4",6]]],[11,"sum","","",42,[[["iterator",8]],["f32x4",6]]],[11,"sum","","",44,[[["iterator",8]],["i64x2",6]]],[11,"sum","","",44,[[["iterator",8]],["i64x2",6]]],[11,"sum","","",45,[[["iterator",8]],["u64x2",6]]],[11,"sum","","",45,[[["iterator",8]],["u64x2",6]]],[11,"sum","","",46,[[["iterator",8]],["f64x2",6]]],[11,"sum","","",46,[[["iterator",8]],["f64x2",6]]],[11,"sum","","",51,[[["iterator",8]],["i128x1",6]]],[11,"sum","","",51,[[["iterator",8]],["i128x1",6]]],[11,"sum","","",52,[[["iterator",8]],["u128x1",6]]],[11,"sum","","",52,[[["iterator",8]],["u128x1",6]]],[11,"sum","","",54,[[["iterator",8]],["i8x32",6]]],[11,"sum","","",54,[[["iterator",8]],["i8x32",6]]],[11,"sum","","",55,[[["iterator",8]],["u8x32",6]]],[11,"sum","","",55,[[["iterator",8]],["u8x32",6]]],[11,"sum","","",57,[[["iterator",8]],["i16x16",6]]],[11,"sum","","",57,[[["iterator",8]],["i16x16",6]]],[11,"sum","","",58,[[["iterator",8]],["u16x16",6]]],[11,"sum","","",58,[[["iterator",8]],["u16x16",6]]],[11,"sum","","",60,[[["iterator",8]],["i32x8",6]]],[11,"sum","","",60,[[["iterator",8]],["i32x8",6]]],[11,"sum","","",61,[[["iterator",8]],["u32x8",6]]],[11,"sum","","",61,[[["iterator",8]],["u32x8",6]]],[11,"sum","","",62,[[["iterator",8]],["f32x8",6]]],[11,"sum","","",62,[[["iterator",8]],["f32x8",6]]],[11,"sum","","",64,[[["iterator",8]],["i64x4",6]]],[11,"sum","","",64,[[["iterator",8]],["i64x4",6]]],[11,"sum","","",65,[[["iterator",8]],["u64x4",6]]],[11,"sum","","",65,[[["iterator",8]],["u64x4",6]]],[11,"sum","","",66,[[["iterator",8]],["f64x4",6]]],[11,"sum","","",66,[[["iterator",8]],["f64x4",6]]],[11,"sum","","",68,[[["iterator",8]],["i128x2",6]]],[11,"sum","","",68,[[["iterator",8]],["i128x2",6]]],[11,"sum","","",69,[[["iterator",8]],["u128x2",6]]],[11,"sum","","",69,[[["iterator",8]],["u128x2",6]]],[11,"sum","","",74,[[["iterator",8]],["i8x64",6]]],[11,"sum","","",74,[[["iterator",8]],["i8x64",6]]],[11,"sum","","",75,[[["iterator",8]],["u8x64",6]]],[11,"sum","","",75,[[["iterator",8]],["u8x64",6]]],[11,"sum","","",77,[[["iterator",8]],["i16x32",6]]],[11,"sum","","",77,[[["iterator",8]],["i16x32",6]]],[11,"sum","","",78,[[["iterator",8]],["u16x32",6]]],[11,"sum","","",78,[[["iterator",8]],["u16x32",6]]],[11,"sum","","",80,[[["iterator",8]],["i32x16",6]]],[11,"sum","","",80,[[["iterator",8]],["i32x16",6]]],[11,"sum","","",81,[[["iterator",8]],["u32x16",6]]],[11,"sum","","",81,[[["iterator",8]],["u32x16",6]]],[11,"sum","","",82,[[["iterator",8]],["f32x16",6]]],[11,"sum","","",82,[[["iterator",8]],["f32x16",6]]],[11,"sum","","",84,[[["iterator",8]],["i64x8",6]]],[11,"sum","","",84,[[["iterator",8]],["i64x8",6]]],[11,"sum","","",85,[[["iterator",8]],["u64x8",6]]],[11,"sum","","",85,[[["iterator",8]],["u64x8",6]]],[11,"sum","","",86,[[["iterator",8]],["f64x8",6]]],[11,"sum","","",86,[[["iterator",8]],["f64x8",6]]],[11,"sum","","",88,[[["iterator",8]],["i128x4",6]]],[11,"sum","","",88,[[["iterator",8]],["i128x4",6]]],[11,"sum","","",89,[[["iterator",8]],["u128x4",6]]],[11,"sum","","",89,[[["iterator",8]],["u128x4",6]]],[11,"sum","","",48,[[["iterator",8]],["isizex2",6]]],[11,"sum","","",48,[[["iterator",8]],["isizex2",6]]],[11,"sum","","",49,[[["iterator",8]],["usizex2",6]]],[11,"sum","","",49,[[["iterator",8]],["usizex2",6]]],[11,"sum","","",71,[[["iterator",8]],["isizex4",6]]],[11,"sum","","",71,[[["iterator",8]],["isizex4",6]]],[11,"sum","","",72,[[["iterator",8]],["usizex4",6]]],[11,"sum","","",72,[[["iterator",8]],["usizex4",6]]],[11,"sum","","",91,[[["iterator",8]],["isizex8",6]]],[11,"sum","","",91,[[["iterator",8]],["isizex8",6]]],[11,"sum","","",92,[[["iterator",8]],["usizex8",6]]],[11,"sum","","",92,[[["iterator",8]],["usizex8",6]]],[11,"into","","",94,[[]]],[11,"into","","",95,[[]]],[11,"into","","",96,[[]]],[11,"into","","",97,[[]]],[11,"into","","",98,[[]]],[11,"into","","",99,[[]]],[11,"fmt","","",15,[[["formatter",3]],["result",6]]],[11,"fmt","","",16,[[["formatter",3]],["result",6]]],[11,"fmt","","",18,[[["formatter",3]],["result",6]]],[11,"fmt","","",19,[[["formatter",3]],["result",6]]],[11,"fmt","","",21,[[["formatter",3]],["result",6]]],[11,"fmt","","",22,[[["formatter",3]],["result",6]]],[11,"fmt","","",24,[[["formatter",3]],["result",6]]],[11,"fmt","","",25,[[["formatter",3]],["result",6]]],[11,"fmt","","",27,[[["formatter",3]],["result",6]]],[11,"fmt","","",28,[[["formatter",3]],["result",6]]],[11,"fmt","","",30,[[["formatter",3]],["result",6]]],[11,"fmt","","",31,[[["formatter",3]],["result",6]]],[11,"fmt","","",34,[[["formatter",3]],["result",6]]],[11,"fmt","","",35,[[["formatter",3]],["result",6]]],[11,"fmt","","",37,[[["formatter",3]],["result",6]]],[11,"fmt","","",38,[[["formatter",3]],["result",6]]],[11,"fmt","","",40,[[["formatter",3]],["result",6]]],[11,"fmt","","",41,[[["formatter",3]],["result",6]]],[11,"fmt","","",44,[[["formatter",3]],["result",6]]],[11,"fmt","","",45,[[["formatter",3]],["result",6]]],[11,"fmt","","",51,[[["formatter",3]],["result",6]]],[11,"fmt","","",52,[[["formatter",3]],["result",6]]],[11,"fmt","","",54,[[["formatter",3]],["result",6]]],[11,"fmt","","",55,[[["formatter",3]],["result",6]]],[11,"fmt","","",57,[[["formatter",3]],["result",6]]],[11,"fmt","","",58,[[["formatter",3]],["result",6]]],[11,"fmt","","",60,[[["formatter",3]],["result",6]]],[11,"fmt","","",61,[[["formatter",3]],["result",6]]],[11,"fmt","","",64,[[["formatter",3]],["result",6]]],[11,"fmt","","",65,[[["formatter",3]],["result",6]]],[11,"fmt","","",68,[[["formatter",3]],["result",6]]],[11,"fmt","","",69,[[["formatter",3]],["result",6]]],[11,"fmt","","",74,[[["formatter",3]],["result",6]]],[11,"fmt","","",75,[[["formatter",3]],["result",6]]],[11,"fmt","","",77,[[["formatter",3]],["result",6]]],[11,"fmt","","",78,[[["formatter",3]],["result",6]]],[11,"fmt","","",80,[[["formatter",3]],["result",6]]],[11,"fmt","","",81,[[["formatter",3]],["result",6]]],[11,"fmt","","",84,[[["formatter",3]],["result",6]]],[11,"fmt","","",85,[[["formatter",3]],["result",6]]],[11,"fmt","","",88,[[["formatter",3]],["result",6]]],[11,"fmt","","",89,[[["formatter",3]],["result",6]]],[11,"fmt","","",48,[[["formatter",3]],["result",6]]],[11,"fmt","","",49,[[["formatter",3]],["result",6]]],[11,"fmt","","",71,[[["formatter",3]],["result",6]]],[11,"fmt","","",72,[[["formatter",3]],["result",6]]],[11,"fmt","","",91,[[["formatter",3]],["result",6]]],[11,"fmt","","",92,[[["formatter",3]],["result",6]]],[11,"fmt","","",15,[[["formatter",3]],["result",6]]],[11,"fmt","","",16,[[["formatter",3]],["result",6]]],[11,"fmt","","",18,[[["formatter",3]],["result",6]]],[11,"fmt","","",19,[[["formatter",3]],["result",6]]],[11,"fmt","","",21,[[["formatter",3]],["result",6]]],[11,"fmt","","",22,[[["formatter",3]],["result",6]]],[11,"fmt","","",24,[[["formatter",3]],["result",6]]],[11,"fmt","","",25,[[["formatter",3]],["result",6]]],[11,"fmt","","",27,[[["formatter",3]],["result",6]]],[11,"fmt","","",28,[[["formatter",3]],["result",6]]],[11,"fmt","","",30,[[["formatter",3]],["result",6]]],[11,"fmt","","",31,[[["formatter",3]],["result",6]]],[11,"fmt","","",34,[[["formatter",3]],["result",6]]],[11,"fmt","","",35,[[["formatter",3]],["result",6]]],[11,"fmt","","",37,[[["formatter",3]],["result",6]]],[11,"fmt","","",38,[[["formatter",3]],["result",6]]],[11,"fmt","","",40,[[["formatter",3]],["result",6]]],[11,"fmt","","",41,[[["formatter",3]],["result",6]]],[11,"fmt","","",44,[[["formatter",3]],["result",6]]],[11,"fmt","","",45,[[["formatter",3]],["result",6]]],[11,"fmt","","",51,[[["formatter",3]],["result",6]]],[11,"fmt","","",52,[[["formatter",3]],["result",6]]],[11,"fmt","","",54,[[["formatter",3]],["result",6]]],[11,"fmt","","",55,[[["formatter",3]],["result",6]]],[11,"fmt","","",57,[[["formatter",3]],["result",6]]],[11,"fmt","","",58,[[["formatter",3]],["result",6]]],[11,"fmt","","",60,[[["formatter",3]],["result",6]]],[11,"fmt","","",61,[[["formatter",3]],["result",6]]],[11,"fmt","","",64,[[["formatter",3]],["result",6]]],[11,"fmt","","",65,[[["formatter",3]],["result",6]]],[11,"fmt","","",68,[[["formatter",3]],["result",6]]],[11,"fmt","","",69,[[["formatter",3]],["result",6]]],[11,"fmt","","",74,[[["formatter",3]],["result",6]]],[11,"fmt","","",75,[[["formatter",3]],["result",6]]],[11,"fmt","","",77,[[["formatter",3]],["result",6]]],[11,"fmt","","",78,[[["formatter",3]],["result",6]]],[11,"fmt","","",80,[[["formatter",3]],["result",6]]],[11,"fmt","","",81,[[["formatter",3]],["result",6]]],[11,"fmt","","",84,[[["formatter",3]],["result",6]]],[11,"fmt","","",85,[[["formatter",3]],["result",6]]],[11,"fmt","","",88,[[["formatter",3]],["result",6]]],[11,"fmt","","",89,[[["formatter",3]],["result",6]]],[11,"fmt","","",48,[[["formatter",3]],["result",6]]],[11,"fmt","","",49,[[["formatter",3]],["result",6]]],[11,"fmt","","",71,[[["formatter",3]],["result",6]]],[11,"fmt","","",72,[[["formatter",3]],["result",6]]],[11,"fmt","","",91,[[["formatter",3]],["result",6]]],[11,"fmt","","",92,[[["formatter",3]],["result",6]]],[11,"fmt","","",15,[[["formatter",3]],["result",6]]],[11,"fmt","","",16,[[["formatter",3]],["result",6]]],[11,"fmt","","",18,[[["formatter",3]],["result",6]]],[11,"fmt","","",19,[[["formatter",3]],["result",6]]],[11,"fmt","","",21,[[["formatter",3]],["result",6]]],[11,"fmt","","",22,[[["formatter",3]],["result",6]]],[11,"fmt","","",24,[[["formatter",3]],["result",6]]],[11,"fmt","","",25,[[["formatter",3]],["result",6]]],[11,"fmt","","",27,[[["formatter",3]],["result",6]]],[11,"fmt","","",28,[[["formatter",3]],["result",6]]],[11,"fmt","","",30,[[["formatter",3]],["result",6]]],[11,"fmt","","",31,[[["formatter",3]],["result",6]]],[11,"fmt","","",34,[[["formatter",3]],["result",6]]],[11,"fmt","","",35,[[["formatter",3]],["result",6]]],[11,"fmt","","",37,[[["formatter",3]],["result",6]]],[11,"fmt","","",38,[[["formatter",3]],["result",6]]],[11,"fmt","","",40,[[["formatter",3]],["result",6]]],[11,"fmt","","",41,[[["formatter",3]],["result",6]]],[11,"fmt","","",44,[[["formatter",3]],["result",6]]],[11,"fmt","","",45,[[["formatter",3]],["result",6]]],[11,"fmt","","",51,[[["formatter",3]],["result",6]]],[11,"fmt","","",52,[[["formatter",3]],["result",6]]],[11,"fmt","","",54,[[["formatter",3]],["result",6]]],[11,"fmt","","",55,[[["formatter",3]],["result",6]]],[11,"fmt","","",57,[[["formatter",3]],["result",6]]],[11,"fmt","","",58,[[["formatter",3]],["result",6]]],[11,"fmt","","",60,[[["formatter",3]],["result",6]]],[11,"fmt","","",61,[[["formatter",3]],["result",6]]],[11,"fmt","","",64,[[["formatter",3]],["result",6]]],[11,"fmt","","",65,[[["formatter",3]],["result",6]]],[11,"fmt","","",68,[[["formatter",3]],["result",6]]],[11,"fmt","","",69,[[["formatter",3]],["result",6]]],[11,"fmt","","",74,[[["formatter",3]],["result",6]]],[11,"fmt","","",75,[[["formatter",3]],["result",6]]],[11,"fmt","","",77,[[["formatter",3]],["result",6]]],[11,"fmt","","",78,[[["formatter",3]],["result",6]]],[11,"fmt","","",80,[[["formatter",3]],["result",6]]],[11,"fmt","","",81,[[["formatter",3]],["result",6]]],[11,"fmt","","",84,[[["formatter",3]],["result",6]]],[11,"fmt","","",85,[[["formatter",3]],["result",6]]],[11,"fmt","","",88,[[["formatter",3]],["result",6]]],[11,"fmt","","",89,[[["formatter",3]],["result",6]]],[11,"fmt","","",48,[[["formatter",3]],["result",6]]],[11,"fmt","","",49,[[["formatter",3]],["result",6]]],[11,"fmt","","",71,[[["formatter",3]],["result",6]]],[11,"fmt","","",72,[[["formatter",3]],["result",6]]],[11,"fmt","","",91,[[["formatter",3]],["result",6]]],[11,"fmt","","",92,[[["formatter",3]],["result",6]]],[11,"fmt","","",15,[[["formatter",3]],["result",6]]],[11,"fmt","","",16,[[["formatter",3]],["result",6]]],[11,"fmt","","",18,[[["formatter",3]],["result",6]]],[11,"fmt","","",19,[[["formatter",3]],["result",6]]],[11,"fmt","","",21,[[["formatter",3]],["result",6]]],[11,"fmt","","",22,[[["formatter",3]],["result",6]]],[11,"fmt","","",24,[[["formatter",3]],["result",6]]],[11,"fmt","","",25,[[["formatter",3]],["result",6]]],[11,"fmt","","",27,[[["formatter",3]],["result",6]]],[11,"fmt","","",28,[[["formatter",3]],["result",6]]],[11,"fmt","","",30,[[["formatter",3]],["result",6]]],[11,"fmt","","",31,[[["formatter",3]],["result",6]]],[11,"fmt","","",34,[[["formatter",3]],["result",6]]],[11,"fmt","","",35,[[["formatter",3]],["result",6]]],[11,"fmt","","",37,[[["formatter",3]],["result",6]]],[11,"fmt","","",38,[[["formatter",3]],["result",6]]],[11,"fmt","","",40,[[["formatter",3]],["result",6]]],[11,"fmt","","",41,[[["formatter",3]],["result",6]]],[11,"fmt","","",44,[[["formatter",3]],["result",6]]],[11,"fmt","","",45,[[["formatter",3]],["result",6]]],[11,"fmt","","",51,[[["formatter",3]],["result",6]]],[11,"fmt","","",52,[[["formatter",3]],["result",6]]],[11,"fmt","","",54,[[["formatter",3]],["result",6]]],[11,"fmt","","",55,[[["formatter",3]],["result",6]]],[11,"fmt","","",57,[[["formatter",3]],["result",6]]],[11,"fmt","","",58,[[["formatter",3]],["result",6]]],[11,"fmt","","",60,[[["formatter",3]],["result",6]]],[11,"fmt","","",61,[[["formatter",3]],["result",6]]],[11,"fmt","","",64,[[["formatter",3]],["result",6]]],[11,"fmt","","",65,[[["formatter",3]],["result",6]]],[11,"fmt","","",68,[[["formatter",3]],["result",6]]],[11,"fmt","","",69,[[["formatter",3]],["result",6]]],[11,"fmt","","",74,[[["formatter",3]],["result",6]]],[11,"fmt","","",75,[[["formatter",3]],["result",6]]],[11,"fmt","","",77,[[["formatter",3]],["result",6]]],[11,"fmt","","",78,[[["formatter",3]],["result",6]]],[11,"fmt","","",80,[[["formatter",3]],["result",6]]],[11,"fmt","","",81,[[["formatter",3]],["result",6]]],[11,"fmt","","",84,[[["formatter",3]],["result",6]]],[11,"fmt","","",85,[[["formatter",3]],["result",6]]],[11,"fmt","","",88,[[["formatter",3]],["result",6]]],[11,"fmt","","",89,[[["formatter",3]],["result",6]]],[11,"fmt","","",48,[[["formatter",3]],["result",6]]],[11,"fmt","","",49,[[["formatter",3]],["result",6]]],[11,"fmt","","",71,[[["formatter",3]],["result",6]]],[11,"fmt","","",72,[[["formatter",3]],["result",6]]],[11,"fmt","","",91,[[["formatter",3]],["result",6]]],[11,"fmt","","",92,[[["formatter",3]],["result",6]]],[11,"clone","","",6,[[],["simd",3]]],[11,"clone","","",14,[[],["lexicographicallyordered",3]]],[11,"clone","","",0,[[],["m8",3]]],[11,"clone","","",1,[[],["m16",3]]],[11,"clone","","",2,[[],["m32",3]]],[11,"clone","","",3,[[],["m64",3]]],[11,"clone","","",4,[[],["m128",3]]],[11,"clone","","",5,[[],["msize",3]]],[11,"default","","",0,[[]]],[11,"default","","",1,[[]]],[11,"default","","",2,[[]]],[11,"default","","",3,[[]]],[11,"default","","",4,[[]]],[11,"default","","",5,[[]]],[11,"default","","",15,[[]]],[11,"default","","",16,[[]]],[11,"default","","",17,[[]]],[11,"default","","",18,[[]]],[11,"default","","",19,[[]]],[11,"default","","",20,[[]]],[11,"default","","",21,[[]]],[11,"default","","",22,[[]]],[11,"default","","",23,[[]]],[11,"default","","",24,[[]]],[11,"default","","",25,[[]]],[11,"default","","",26,[[]]],[11,"default","","",27,[[]]],[11,"default","","",28,[[]]],[11,"default","","",29,[[]]],[11,"default","","",30,[[]]],[11,"default","","",31,[[]]],[11,"default","","",33,[[]]],[11,"default","","",32,[[]]],[11,"default","","",34,[[]]],[11,"default","","",35,[[]]],[11,"default","","",36,[[]]],[11,"default","","",37,[[]]],[11,"default","","",38,[[]]],[11,"default","","",39,[[]]],[11,"default","","",40,[[]]],[11,"default","","",41,[[]]],[11,"default","","",42,[[]]],[11,"default","","",43,[[]]],[11,"default","","",44,[[]]],[11,"default","","",45,[[]]],[11,"default","","",46,[[]]],[11,"default","","",47,[[]]],[11,"default","","",51,[[]]],[11,"default","","",52,[[]]],[11,"default","","",53,[[]]],[11,"default","","",54,[[]]],[11,"default","","",55,[[]]],[11,"default","","",56,[[]]],[11,"default","","",57,[[]]],[11,"default","","",58,[[]]],[11,"default","","",59,[[]]],[11,"default","","",60,[[]]],[11,"default","","",61,[[]]],[11,"default","","",62,[[]]],[11,"default","","",63,[[]]],[11,"default","","",64,[[]]],[11,"default","","",65,[[]]],[11,"default","","",66,[[]]],[11,"default","","",67,[[]]],[11,"default","","",68,[[]]],[11,"default","","",69,[[]]],[11,"default","","",70,[[]]],[11,"default","","",74,[[]]],[11,"default","","",75,[[]]],[11,"default","","",76,[[]]],[11,"default","","",77,[[]]],[11,"default","","",78,[[]]],[11,"default","","",79,[[]]],[11,"default","","",80,[[]]],[11,"default","","",81,[[]]],[11,"default","","",82,[[]]],[11,"default","","",83,[[]]],[11,"default","","",84,[[]]],[11,"default","","",85,[[]]],[11,"default","","",86,[[]]],[11,"default","","",87,[[]]],[11,"default","","",88,[[]]],[11,"default","","",89,[[]]],[11,"default","","",90,[[]]],[11,"default","","",48,[[]]],[11,"default","","",49,[[]]],[11,"default","","",50,[[]]],[11,"default","","",71,[[]]],[11,"default","","",72,[[]]],[11,"default","","",73,[[]]],[11,"default","","",91,[[]]],[11,"default","","",92,[[]]],[11,"default","","",93,[[]]],[11,"default","","",94,[[]]],[11,"default","","",95,[[]]],[11,"default","","",96,[[]]],[11,"default","","",97,[[]]],[11,"default","","",98,[[]]],[11,"default","","",99,[[]]],[18,"N","","The number of elements in the array.",8,null],[18,"LANES","","The number of elements in the SIMD vector.",7,null],[11,"new","","Creates a new instance with each vector elements…",15,[[]]],[11,"lanes","","Returns the number of vector lanes.",15,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",15,[[]]],[11,"extract","","Extracts the value at `index`.",15,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",15,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",15,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",15,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",15,[[["i8x2",6]],["i8x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",15,[[["i8x2",6]],["i8x2",6]]],[11,"min","","Minimum of two vectors.",15,[[]]],[11,"max","","Maximum of two vectors.",15,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",15,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",15,[[]]],[11,"max_element","","Largest vector element value.",15,[[]]],[11,"min_element","","Smallest vector element value.",15,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",15,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",15,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",15,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",15,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",15,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",15,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",15,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",15,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",15,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",15,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",15,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",15,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",15,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",15,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",15,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",15,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",15,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",15,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",15,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",15,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",15,[[]]],[11,"eq","","Lane-wise equality comparison.",15,[[],["m8x2",6]]],[11,"ne","","Lane-wise inequality comparison.",15,[[],["m8x2",6]]],[11,"lt","","Lane-wise less-than comparison.",15,[[],["m8x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",15,[[],["m8x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",15,[[],["m8x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",15,[[],["m8x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",15,[[],[["lexicographicallyordered",3],["i8x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",15,[[],[["lexicographicallyordered",3],["i8x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",15,[[]]],[11,"new","","Creates a new instance with each vector elements…",16,[[]]],[11,"lanes","","Returns the number of vector lanes.",16,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",16,[[]]],[11,"extract","","Extracts the value at `index`.",16,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",16,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",16,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",16,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",16,[[["u8x2",6]],["u8x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",16,[[["u8x2",6]],["u8x2",6]]],[11,"min","","Minimum of two vectors.",16,[[]]],[11,"max","","Maximum of two vectors.",16,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",16,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",16,[[]]],[11,"max_element","","Largest vector element value.",16,[[]]],[11,"min_element","","Smallest vector element value.",16,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",16,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",16,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",16,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",16,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",16,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",16,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",16,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",16,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",16,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",16,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",16,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",16,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",16,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",16,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",16,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",16,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",16,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",16,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",16,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",16,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",16,[[]]],[11,"eq","","Lane-wise equality comparison.",16,[[],["m8x2",6]]],[11,"ne","","Lane-wise inequality comparison.",16,[[],["m8x2",6]]],[11,"lt","","Lane-wise less-than comparison.",16,[[],["m8x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",16,[[],["m8x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",16,[[],["m8x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",16,[[],["m8x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",16,[[],[["lexicographicallyordered",3],["u8x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",16,[[],[["lexicographicallyordered",3],["u8x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",16,[[]]],[11,"new","","Creates a new instance with each vector elements…",17,[[]]],[11,"lanes","","Returns the number of vector lanes.",17,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",17,[[]]],[11,"extract","","Extracts the value at `index`.",17,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",17,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",17,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",17,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",17,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",17,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",17,[[]]],[11,"all","","Are `all` vector lanes `true`?",17,[[]]],[11,"any","","Is `any` vector lane `true`?",17,[[]]],[11,"none","","Are `all` vector lanes `false`?",17,[[]]],[11,"eq","","Lane-wise equality comparison.",17,[[],["m8x2",6]]],[11,"ne","","Lane-wise inequality comparison.",17,[[],["m8x2",6]]],[11,"lt","","Lane-wise less-than comparison.",17,[[],["m8x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",17,[[],["m8x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",17,[[],["m8x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",17,[[],["m8x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",17,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",17,[[],[["m8x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",17,[[],[["m8x2",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",17,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",17,[[]]],[11,"new","","Creates a new instance with each vector elements…",18,[[]]],[11,"lanes","","Returns the number of vector lanes.",18,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",18,[[]]],[11,"extract","","Extracts the value at `index`.",18,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",18,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",18,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",18,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",18,[[["i8x4",6]],["i8x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",18,[[["i8x4",6]],["i8x4",6]]],[11,"min","","Minimum of two vectors.",18,[[]]],[11,"max","","Maximum of two vectors.",18,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",18,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",18,[[]]],[11,"max_element","","Largest vector element value.",18,[[]]],[11,"min_element","","Smallest vector element value.",18,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",18,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",18,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",18,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",18,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",18,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",18,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",18,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",18,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",18,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",18,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",18,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",18,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",18,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",18,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",18,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",18,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",18,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",18,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",18,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",18,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",18,[[]]],[11,"eq","","Lane-wise equality comparison.",18,[[],["m8x4",6]]],[11,"ne","","Lane-wise inequality comparison.",18,[[],["m8x4",6]]],[11,"lt","","Lane-wise less-than comparison.",18,[[],["m8x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",18,[[],["m8x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",18,[[],["m8x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",18,[[],["m8x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",18,[[],[["lexicographicallyordered",3],["i8x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",18,[[],[["lexicographicallyordered",3],["i8x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",18,[[]]],[11,"new","","Creates a new instance with each vector elements…",19,[[]]],[11,"lanes","","Returns the number of vector lanes.",19,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",19,[[]]],[11,"extract","","Extracts the value at `index`.",19,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",19,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",19,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",19,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",19,[[["u8x4",6]],["u8x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",19,[[["u8x4",6]],["u8x4",6]]],[11,"min","","Minimum of two vectors.",19,[[]]],[11,"max","","Maximum of two vectors.",19,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",19,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",19,[[]]],[11,"max_element","","Largest vector element value.",19,[[]]],[11,"min_element","","Smallest vector element value.",19,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",19,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",19,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",19,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",19,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",19,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",19,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",19,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",19,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",19,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",19,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",19,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",19,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",19,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",19,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",19,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",19,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",19,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",19,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",19,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",19,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",19,[[]]],[11,"eq","","Lane-wise equality comparison.",19,[[],["m8x4",6]]],[11,"ne","","Lane-wise inequality comparison.",19,[[],["m8x4",6]]],[11,"lt","","Lane-wise less-than comparison.",19,[[],["m8x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",19,[[],["m8x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",19,[[],["m8x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",19,[[],["m8x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",19,[[],[["u8x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",19,[[],[["u8x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",19,[[]]],[11,"new","","Creates a new instance with each vector elements…",20,[[]]],[11,"lanes","","Returns the number of vector lanes.",20,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",20,[[]]],[11,"extract","","Extracts the value at `index`.",20,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",20,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",20,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",20,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",20,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",20,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",20,[[]]],[11,"all","","Are `all` vector lanes `true`?",20,[[]]],[11,"any","","Is `any` vector lane `true`?",20,[[]]],[11,"none","","Are `all` vector lanes `false`?",20,[[]]],[11,"eq","","Lane-wise equality comparison.",20,[[],["m8x4",6]]],[11,"ne","","Lane-wise inequality comparison.",20,[[],["m8x4",6]]],[11,"lt","","Lane-wise less-than comparison.",20,[[],["m8x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",20,[[],["m8x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",20,[[],["m8x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",20,[[],["m8x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",20,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",20,[[],[["lexicographicallyordered",3],["m8x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",20,[[],[["lexicographicallyordered",3],["m8x4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",20,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",20,[[]]],[11,"new","","Creates a new instance with each vector elements…",21,[[]]],[11,"lanes","","Returns the number of vector lanes.",21,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",21,[[]]],[11,"extract","","Extracts the value at `index`.",21,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",21,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",21,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",21,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",21,[[["i16x2",6]],["i16x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",21,[[["i16x2",6]],["i16x2",6]]],[11,"min","","Minimum of two vectors.",21,[[]]],[11,"max","","Maximum of two vectors.",21,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",21,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",21,[[]]],[11,"max_element","","Largest vector element value.",21,[[]]],[11,"min_element","","Smallest vector element value.",21,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",21,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",21,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",21,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",21,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",21,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",21,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",21,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",21,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",21,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",21,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",21,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",21,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",21,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",21,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",21,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",21,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",21,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",21,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",21,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",21,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",21,[[]]],[11,"eq","","Lane-wise equality comparison.",21,[[],["m16x2",6]]],[11,"ne","","Lane-wise inequality comparison.",21,[[],["m16x2",6]]],[11,"lt","","Lane-wise less-than comparison.",21,[[],["m16x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",21,[[],["m16x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",21,[[],["m16x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",21,[[],["m16x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",21,[[],[["lexicographicallyordered",3],["i16x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",21,[[],[["lexicographicallyordered",3],["i16x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",21,[[]]],[11,"new","","Creates a new instance with each vector elements…",22,[[]]],[11,"lanes","","Returns the number of vector lanes.",22,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",22,[[]]],[11,"extract","","Extracts the value at `index`.",22,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",22,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",22,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",22,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",22,[[["u16x2",6]],["u16x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",22,[[["u16x2",6]],["u16x2",6]]],[11,"min","","Minimum of two vectors.",22,[[]]],[11,"max","","Maximum of two vectors.",22,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",22,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",22,[[]]],[11,"max_element","","Largest vector element value.",22,[[]]],[11,"min_element","","Smallest vector element value.",22,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",22,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",22,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",22,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",22,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",22,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",22,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",22,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",22,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",22,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",22,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",22,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",22,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",22,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",22,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",22,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",22,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",22,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",22,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",22,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",22,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",22,[[]]],[11,"eq","","Lane-wise equality comparison.",22,[[],["m16x2",6]]],[11,"ne","","Lane-wise inequality comparison.",22,[[],["m16x2",6]]],[11,"lt","","Lane-wise less-than comparison.",22,[[],["m16x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",22,[[],["m16x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",22,[[],["m16x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",22,[[],["m16x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",22,[[],[["u16x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",22,[[],[["u16x2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",22,[[]]],[11,"new","","Creates a new instance with each vector elements…",23,[[]]],[11,"lanes","","Returns the number of vector lanes.",23,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",23,[[]]],[11,"extract","","Extracts the value at `index`.",23,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",23,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",23,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",23,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",23,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",23,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",23,[[]]],[11,"all","","Are `all` vector lanes `true`?",23,[[]]],[11,"any","","Is `any` vector lane `true`?",23,[[]]],[11,"none","","Are `all` vector lanes `false`?",23,[[]]],[11,"eq","","Lane-wise equality comparison.",23,[[],["m16x2",6]]],[11,"ne","","Lane-wise inequality comparison.",23,[[],["m16x2",6]]],[11,"lt","","Lane-wise less-than comparison.",23,[[],["m16x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",23,[[],["m16x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",23,[[],["m16x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",23,[[],["m16x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",23,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",23,[[],[["lexicographicallyordered",3],["m16x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",23,[[],[["lexicographicallyordered",3],["m16x2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",23,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",23,[[]]],[11,"new","","Creates a new instance with each vector elements…",24,[[]]],[11,"lanes","","Returns the number of vector lanes.",24,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",24,[[]]],[11,"extract","","Extracts the value at `index`.",24,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",24,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",24,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",24,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",24,[[["i8x8",6]],["i8x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",24,[[["i8x8",6]],["i8x8",6]]],[11,"min","","Minimum of two vectors.",24,[[]]],[11,"max","","Maximum of two vectors.",24,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",24,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",24,[[]]],[11,"max_element","","Largest vector element value.",24,[[]]],[11,"min_element","","Smallest vector element value.",24,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",24,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",24,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",24,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",24,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",24,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",24,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",24,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",24,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",24,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",24,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",24,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",24,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",24,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",24,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",24,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",24,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",24,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",24,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",24,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",24,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",24,[[]]],[11,"eq","","Lane-wise equality comparison.",24,[[],["m8x8",6]]],[11,"ne","","Lane-wise inequality comparison.",24,[[],["m8x8",6]]],[11,"lt","","Lane-wise less-than comparison.",24,[[],["m8x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",24,[[],["m8x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",24,[[],["m8x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",24,[[],["m8x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",24,[[],[["lexicographicallyordered",3],["i8x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",24,[[],[["lexicographicallyordered",3],["i8x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",24,[[]]],[11,"new","","Creates a new instance with each vector elements…",25,[[]]],[11,"lanes","","Returns the number of vector lanes.",25,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",25,[[]]],[11,"extract","","Extracts the value at `index`.",25,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",25,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",25,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",25,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",25,[[["u8x8",6]],["u8x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",25,[[["u8x8",6]],["u8x8",6]]],[11,"min","","Minimum of two vectors.",25,[[]]],[11,"max","","Maximum of two vectors.",25,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",25,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",25,[[]]],[11,"max_element","","Largest vector element value.",25,[[]]],[11,"min_element","","Smallest vector element value.",25,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",25,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",25,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",25,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",25,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",25,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",25,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",25,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",25,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",25,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",25,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",25,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",25,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",25,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",25,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",25,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",25,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",25,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",25,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",25,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",25,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",25,[[]]],[11,"eq","","Lane-wise equality comparison.",25,[[],["m8x8",6]]],[11,"ne","","Lane-wise inequality comparison.",25,[[],["m8x8",6]]],[11,"lt","","Lane-wise less-than comparison.",25,[[],["m8x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",25,[[],["m8x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",25,[[],["m8x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",25,[[],["m8x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",25,[[],[["lexicographicallyordered",3],["u8x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",25,[[],[["lexicographicallyordered",3],["u8x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",25,[[]]],[11,"new","","Creates a new instance with each vector elements…",26,[[]]],[11,"lanes","","Returns the number of vector lanes.",26,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",26,[[]]],[11,"extract","","Extracts the value at `index`.",26,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",26,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",26,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",26,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",26,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",26,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",26,[[]]],[11,"all","","Are `all` vector lanes `true`?",26,[[]]],[11,"any","","Is `any` vector lane `true`?",26,[[]]],[11,"none","","Are `all` vector lanes `false`?",26,[[]]],[11,"eq","","Lane-wise equality comparison.",26,[[],["m8x8",6]]],[11,"ne","","Lane-wise inequality comparison.",26,[[],["m8x8",6]]],[11,"lt","","Lane-wise less-than comparison.",26,[[],["m8x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",26,[[],["m8x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",26,[[],["m8x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",26,[[],["m8x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",26,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",26,[[],[["lexicographicallyordered",3],["m8x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",26,[[],[["lexicographicallyordered",3],["m8x8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",26,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",26,[[]]],[11,"new","","Creates a new instance with each vector elements…",27,[[]]],[11,"lanes","","Returns the number of vector lanes.",27,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",27,[[]]],[11,"extract","","Extracts the value at `index`.",27,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",27,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",27,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",27,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",27,[[["i16x4",6]],["i16x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",27,[[["i16x4",6]],["i16x4",6]]],[11,"min","","Minimum of two vectors.",27,[[]]],[11,"max","","Maximum of two vectors.",27,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",27,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",27,[[]]],[11,"max_element","","Largest vector element value.",27,[[]]],[11,"min_element","","Smallest vector element value.",27,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",27,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",27,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",27,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",27,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",27,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",27,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",27,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",27,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",27,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",27,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",27,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",27,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",27,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",27,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",27,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",27,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",27,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",27,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",27,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",27,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",27,[[]]],[11,"eq","","Lane-wise equality comparison.",27,[[],["m16x4",6]]],[11,"ne","","Lane-wise inequality comparison.",27,[[],["m16x4",6]]],[11,"lt","","Lane-wise less-than comparison.",27,[[],["m16x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",27,[[],["m16x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",27,[[],["m16x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",27,[[],["m16x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",27,[[],[["i16x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",27,[[],[["i16x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",27,[[]]],[11,"new","","Creates a new instance with each vector elements…",28,[[]]],[11,"lanes","","Returns the number of vector lanes.",28,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",28,[[]]],[11,"extract","","Extracts the value at `index`.",28,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",28,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",28,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",28,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",28,[[["u16x4",6]],["u16x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",28,[[["u16x4",6]],["u16x4",6]]],[11,"min","","Minimum of two vectors.",28,[[]]],[11,"max","","Maximum of two vectors.",28,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",28,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",28,[[]]],[11,"max_element","","Largest vector element value.",28,[[]]],[11,"min_element","","Smallest vector element value.",28,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",28,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",28,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",28,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",28,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",28,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",28,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",28,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",28,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",28,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",28,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",28,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",28,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",28,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",28,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",28,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",28,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",28,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",28,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",28,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",28,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",28,[[]]],[11,"eq","","Lane-wise equality comparison.",28,[[],["m16x4",6]]],[11,"ne","","Lane-wise inequality comparison.",28,[[],["m16x4",6]]],[11,"lt","","Lane-wise less-than comparison.",28,[[],["m16x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",28,[[],["m16x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",28,[[],["m16x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",28,[[],["m16x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",28,[[],[["lexicographicallyordered",3],["u16x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",28,[[],[["lexicographicallyordered",3],["u16x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",28,[[]]],[11,"new","","Creates a new instance with each vector elements…",29,[[]]],[11,"lanes","","Returns the number of vector lanes.",29,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",29,[[]]],[11,"extract","","Extracts the value at `index`.",29,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",29,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",29,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",29,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",29,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",29,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",29,[[]]],[11,"all","","Are `all` vector lanes `true`?",29,[[]]],[11,"any","","Is `any` vector lane `true`?",29,[[]]],[11,"none","","Are `all` vector lanes `false`?",29,[[]]],[11,"eq","","Lane-wise equality comparison.",29,[[],["m16x4",6]]],[11,"ne","","Lane-wise inequality comparison.",29,[[],["m16x4",6]]],[11,"lt","","Lane-wise less-than comparison.",29,[[],["m16x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",29,[[],["m16x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",29,[[],["m16x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",29,[[],["m16x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",29,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",29,[[],[["lexicographicallyordered",3],["m16x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",29,[[],[["lexicographicallyordered",3],["m16x4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",29,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",29,[[]]],[11,"new","","Creates a new instance with each vector elements…",30,[[]]],[11,"lanes","","Returns the number of vector lanes.",30,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",30,[[]]],[11,"extract","","Extracts the value at `index`.",30,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",30,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",30,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",30,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",30,[[["i32x2",6]],["i32x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",30,[[["i32x2",6]],["i32x2",6]]],[11,"min","","Minimum of two vectors.",30,[[]]],[11,"max","","Maximum of two vectors.",30,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",30,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",30,[[]]],[11,"max_element","","Largest vector element value.",30,[[]]],[11,"min_element","","Smallest vector element value.",30,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",30,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",30,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",30,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",30,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",30,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",30,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",30,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",30,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",30,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",30,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",30,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",30,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",30,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",30,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",30,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",30,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",30,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",30,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",30,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",30,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",30,[[]]],[11,"eq","","Lane-wise equality comparison.",30,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",30,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",30,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",30,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",30,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",30,[[],["m32x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",30,[[],[["i32x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",30,[[],[["i32x2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",30,[[]]],[11,"new","","Creates a new instance with each vector elements…",31,[[]]],[11,"lanes","","Returns the number of vector lanes.",31,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",31,[[]]],[11,"extract","","Extracts the value at `index`.",31,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",31,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",31,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",31,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",31,[[["u32x2",6]],["u32x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",31,[[["u32x2",6]],["u32x2",6]]],[11,"min","","Minimum of two vectors.",31,[[]]],[11,"max","","Maximum of two vectors.",31,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",31,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",31,[[]]],[11,"max_element","","Largest vector element value.",31,[[]]],[11,"min_element","","Smallest vector element value.",31,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",31,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",31,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",31,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",31,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",31,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",31,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",31,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",31,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",31,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",31,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",31,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",31,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",31,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",31,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",31,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",31,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",31,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",31,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",31,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",31,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",31,[[]]],[11,"eq","","Lane-wise equality comparison.",31,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",31,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",31,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",31,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",31,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",31,[[],["m32x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",31,[[],[["lexicographicallyordered",3],["u32x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",31,[[],[["lexicographicallyordered",3],["u32x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",31,[[]]],[11,"new","","Creates a new instance with each vector elements…",33,[[]]],[11,"lanes","","Returns the number of vector lanes.",33,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",33,[[]]],[11,"extract","","Extracts the value at `index`.",33,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",33,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",33,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",33,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",33,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",33,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",33,[[]]],[11,"all","","Are `all` vector lanes `true`?",33,[[]]],[11,"any","","Is `any` vector lane `true`?",33,[[]]],[11,"none","","Are `all` vector lanes `false`?",33,[[]]],[11,"eq","","Lane-wise equality comparison.",33,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",33,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",33,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",33,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",33,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",33,[[],["m32x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",33,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",33,[[],[["lexicographicallyordered",3],["m32x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",33,[[],[["lexicographicallyordered",3],["m32x2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",33,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",33,[[]]],[11,"new","","Creates a new instance with each vector elements…",32,[[]]],[11,"lanes","","Returns the number of vector lanes.",32,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",32,[[]]],[11,"extract","","Extracts the value at `index`.",32,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",32,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",32,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",32,[[]]],[11,"min","","Minimum of two vectors.",32,[[]]],[11,"max","","Maximum of two vectors.",32,[[]]],[11,"sum","","Horizontal sum of the vector elements.",32,[[]]],[11,"product","","Horizontal product of the vector elements.",32,[[]]],[11,"max_element","","Largest vector element value.",32,[[]]],[11,"min_element","","Smallest vector element value.",32,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",32,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",32,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",32,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",32,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",32,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",32,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",32,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",32,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",32,[[]]],[18,"EPSILON","","Machine epsilon value.",32,null],[18,"MIN","","Smallest finite value.",32,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",32,null],[18,"MAX","","Largest finite value.",32,null],[18,"NAN","","Not a Number (NaN).",32,null],[18,"INFINITY","","Infinity (∞).",32,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",32,null],[18,"PI","","Archimedes\' constant (π)",32,null],[18,"FRAC_PI_2","","π/2",32,null],[18,"FRAC_PI_3","","π/3",32,null],[18,"FRAC_PI_4","","π/4",32,null],[18,"FRAC_PI_6","","π/6",32,null],[18,"FRAC_PI_8","","π/8",32,null],[18,"FRAC_1_PI","","1/π",32,null],[18,"FRAC_2_PI","","2/π",32,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",32,null],[18,"SQRT_2","","sqrt(2)",32,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",32,null],[18,"E","","Euler\'s number (e)",32,null],[18,"LOG2_E","","log2(e)",32,null],[18,"LOG10_E","","log10(e)",32,null],[18,"LN_2","","ln(2)",32,null],[18,"LN_10","","ln(10)",32,null],[11,"is_nan","","",32,[[],["m32x2",6]]],[11,"is_infinite","","",32,[[],["m32x2",6]]],[11,"is_finite","","",32,[[],["m32x2",6]]],[11,"abs","","Absolute value.",32,[[]]],[11,"cos","","Cosine.",32,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",32,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",32,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",32,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",32,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",32,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",32,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",32,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",32,[[]]],[11,"sin","","Sine.",32,[[]]],[11,"sin_pi","","Sine of `self * PI`.",32,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",32,[[]]],[11,"sqrt","","",32,[[]]],[11,"sqrte","","Square-root estimate.",32,[[]]],[11,"tanh","","Tanh.",32,[[]]],[11,"eq","","Lane-wise equality comparison.",32,[[],["m32x2",6]]],[11,"ne","","Lane-wise inequality comparison.",32,[[],["m32x2",6]]],[11,"lt","","Lane-wise less-than comparison.",32,[[],["m32x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",32,[[],["m32x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",32,[[],["m32x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",32,[[],["m32x2",6]]],[11,"new","","Creates a new instance with each vector elements…",34,[[]]],[11,"lanes","","Returns the number of vector lanes.",34,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",34,[[]]],[11,"extract","","Extracts the value at `index`.",34,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",34,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",34,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",34,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",34,[[["i8x16",6]],["i8x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",34,[[["i8x16",6]],["i8x16",6]]],[11,"min","","Minimum of two vectors.",34,[[]]],[11,"max","","Maximum of two vectors.",34,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",34,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",34,[[]]],[11,"max_element","","Largest vector element value.",34,[[]]],[11,"min_element","","Smallest vector element value.",34,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",34,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",34,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",34,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",34,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",34,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",34,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",34,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",34,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",34,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",34,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",34,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",34,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",34,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",34,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",34,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",34,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",34,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",34,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",34,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",34,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",34,[[]]],[11,"eq","","Lane-wise equality comparison.",34,[[],["m8x16",6]]],[11,"ne","","Lane-wise inequality comparison.",34,[[],["m8x16",6]]],[11,"lt","","Lane-wise less-than comparison.",34,[[],["m8x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",34,[[],["m8x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",34,[[],["m8x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",34,[[],["m8x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",34,[[],[["i8x16",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",34,[[],[["i8x16",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",34,[[]]],[11,"new","","Creates a new instance with each vector elements…",35,[[]]],[11,"lanes","","Returns the number of vector lanes.",35,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",35,[[]]],[11,"extract","","Extracts the value at `index`.",35,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",35,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",35,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",35,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",35,[[["u8x16",6]],["u8x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",35,[[["u8x16",6]],["u8x16",6]]],[11,"min","","Minimum of two vectors.",35,[[]]],[11,"max","","Maximum of two vectors.",35,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",35,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",35,[[]]],[11,"max_element","","Largest vector element value.",35,[[]]],[11,"min_element","","Smallest vector element value.",35,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",35,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",35,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",35,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",35,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",35,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",35,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",35,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",35,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",35,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",35,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",35,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",35,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",35,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",35,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",35,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",35,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",35,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",35,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",35,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",35,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",35,[[]]],[11,"eq","","Lane-wise equality comparison.",35,[[],["m8x16",6]]],[11,"ne","","Lane-wise inequality comparison.",35,[[],["m8x16",6]]],[11,"lt","","Lane-wise less-than comparison.",35,[[],["m8x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",35,[[],["m8x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",35,[[],["m8x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",35,[[],["m8x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",35,[[],[["lexicographicallyordered",3],["u8x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",35,[[],[["lexicographicallyordered",3],["u8x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",35,[[]]],[11,"new","","Creates a new instance with each vector elements…",36,[[]]],[11,"lanes","","Returns the number of vector lanes.",36,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",36,[[]]],[11,"extract","","Extracts the value at `index`.",36,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",36,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",36,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",36,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",36,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",36,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",36,[[]]],[11,"all","","Are `all` vector lanes `true`?",36,[[]]],[11,"any","","Is `any` vector lane `true`?",36,[[]]],[11,"none","","Are `all` vector lanes `false`?",36,[[]]],[11,"eq","","Lane-wise equality comparison.",36,[[],["m8x16",6]]],[11,"ne","","Lane-wise inequality comparison.",36,[[],["m8x16",6]]],[11,"lt","","Lane-wise less-than comparison.",36,[[],["m8x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",36,[[],["m8x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",36,[[],["m8x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",36,[[],["m8x16",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",36,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",36,[[],[["lexicographicallyordered",3],["m8x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",36,[[],[["lexicographicallyordered",3],["m8x16",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",36,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",36,[[]]],[11,"new","","Creates a new instance with each vector elements…",37,[[]]],[11,"lanes","","Returns the number of vector lanes.",37,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",37,[[]]],[11,"extract","","Extracts the value at `index`.",37,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",37,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",37,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",37,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",37,[[["i16x8",6]],["i16x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",37,[[["i16x8",6]],["i16x8",6]]],[11,"min","","Minimum of two vectors.",37,[[]]],[11,"max","","Maximum of two vectors.",37,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",37,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",37,[[]]],[11,"max_element","","Largest vector element value.",37,[[]]],[11,"min_element","","Smallest vector element value.",37,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",37,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",37,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",37,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",37,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",37,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",37,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",37,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",37,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",37,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",37,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",37,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",37,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",37,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",37,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",37,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",37,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",37,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",37,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",37,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",37,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",37,[[]]],[11,"eq","","Lane-wise equality comparison.",37,[[],["m16x8",6]]],[11,"ne","","Lane-wise inequality comparison.",37,[[],["m16x8",6]]],[11,"lt","","Lane-wise less-than comparison.",37,[[],["m16x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",37,[[],["m16x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",37,[[],["m16x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",37,[[],["m16x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",37,[[],[["lexicographicallyordered",3],["i16x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",37,[[],[["lexicographicallyordered",3],["i16x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",37,[[]]],[11,"new","","Creates a new instance with each vector elements…",38,[[]]],[11,"lanes","","Returns the number of vector lanes.",38,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",38,[[]]],[11,"extract","","Extracts the value at `index`.",38,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",38,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",38,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",38,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",38,[[["u16x8",6]],["u16x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",38,[[["u16x8",6]],["u16x8",6]]],[11,"min","","Minimum of two vectors.",38,[[]]],[11,"max","","Maximum of two vectors.",38,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",38,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",38,[[]]],[11,"max_element","","Largest vector element value.",38,[[]]],[11,"min_element","","Smallest vector element value.",38,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",38,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",38,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",38,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",38,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",38,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",38,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",38,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",38,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",38,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",38,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",38,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",38,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",38,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",38,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",38,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",38,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",38,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",38,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",38,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",38,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",38,[[]]],[11,"eq","","Lane-wise equality comparison.",38,[[],["m16x8",6]]],[11,"ne","","Lane-wise inequality comparison.",38,[[],["m16x8",6]]],[11,"lt","","Lane-wise less-than comparison.",38,[[],["m16x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",38,[[],["m16x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",38,[[],["m16x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",38,[[],["m16x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",38,[[],[["lexicographicallyordered",3],["u16x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",38,[[],[["lexicographicallyordered",3],["u16x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",38,[[]]],[11,"new","","Creates a new instance with each vector elements…",39,[[]]],[11,"lanes","","Returns the number of vector lanes.",39,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",39,[[]]],[11,"extract","","Extracts the value at `index`.",39,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",39,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",39,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",39,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",39,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",39,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",39,[[]]],[11,"all","","Are `all` vector lanes `true`?",39,[[]]],[11,"any","","Is `any` vector lane `true`?",39,[[]]],[11,"none","","Are `all` vector lanes `false`?",39,[[]]],[11,"eq","","Lane-wise equality comparison.",39,[[],["m16x8",6]]],[11,"ne","","Lane-wise inequality comparison.",39,[[],["m16x8",6]]],[11,"lt","","Lane-wise less-than comparison.",39,[[],["m16x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",39,[[],["m16x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",39,[[],["m16x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",39,[[],["m16x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",39,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",39,[[],[["m16x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",39,[[],[["m16x8",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",39,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",39,[[]]],[11,"new","","Creates a new instance with each vector elements…",40,[[]]],[11,"lanes","","Returns the number of vector lanes.",40,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",40,[[]]],[11,"extract","","Extracts the value at `index`.",40,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",40,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",40,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",40,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",40,[[["i32x4",6]],["i32x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",40,[[["i32x4",6]],["i32x4",6]]],[11,"min","","Minimum of two vectors.",40,[[]]],[11,"max","","Maximum of two vectors.",40,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",40,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",40,[[]]],[11,"max_element","","Largest vector element value.",40,[[]]],[11,"min_element","","Smallest vector element value.",40,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",40,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",40,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",40,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",40,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",40,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",40,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",40,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",40,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",40,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",40,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",40,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",40,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",40,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",40,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",40,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",40,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",40,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",40,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",40,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",40,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",40,[[]]],[11,"eq","","Lane-wise equality comparison.",40,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",40,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",40,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",40,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",40,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",40,[[],["m32x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",40,[[],[["lexicographicallyordered",3],["i32x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",40,[[],[["lexicographicallyordered",3],["i32x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",40,[[]]],[11,"new","","Creates a new instance with each vector elements…",41,[[]]],[11,"lanes","","Returns the number of vector lanes.",41,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",41,[[]]],[11,"extract","","Extracts the value at `index`.",41,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",41,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",41,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",41,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",41,[[["u32x4",6]],["u32x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",41,[[["u32x4",6]],["u32x4",6]]],[11,"min","","Minimum of two vectors.",41,[[]]],[11,"max","","Maximum of two vectors.",41,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",41,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",41,[[]]],[11,"max_element","","Largest vector element value.",41,[[]]],[11,"min_element","","Smallest vector element value.",41,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",41,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",41,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",41,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",41,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",41,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",41,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",41,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",41,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",41,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",41,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",41,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",41,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",41,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",41,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",41,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",41,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",41,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",41,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",41,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",41,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",41,[[]]],[11,"eq","","Lane-wise equality comparison.",41,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",41,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",41,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",41,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",41,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",41,[[],["m32x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",41,[[],[["u32x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",41,[[],[["u32x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",41,[[]]],[11,"new","","Creates a new instance with each vector elements…",42,[[]]],[11,"lanes","","Returns the number of vector lanes.",42,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",42,[[]]],[11,"extract","","Extracts the value at `index`.",42,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",42,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",42,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",42,[[]]],[11,"min","","Minimum of two vectors.",42,[[]]],[11,"max","","Maximum of two vectors.",42,[[]]],[11,"sum","","Horizontal sum of the vector elements.",42,[[]]],[11,"product","","Horizontal product of the vector elements.",42,[[]]],[11,"max_element","","Largest vector element value.",42,[[]]],[11,"min_element","","Smallest vector element value.",42,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",42,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",42,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",42,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",42,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",42,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",42,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",42,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",42,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",42,[[]]],[18,"EPSILON","","Machine epsilon value.",42,null],[18,"MIN","","Smallest finite value.",42,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",42,null],[18,"MAX","","Largest finite value.",42,null],[18,"NAN","","Not a Number (NaN).",42,null],[18,"INFINITY","","Infinity (∞).",42,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",42,null],[18,"PI","","Archimedes\' constant (π)",42,null],[18,"FRAC_PI_2","","π/2",42,null],[18,"FRAC_PI_3","","π/3",42,null],[18,"FRAC_PI_4","","π/4",42,null],[18,"FRAC_PI_6","","π/6",42,null],[18,"FRAC_PI_8","","π/8",42,null],[18,"FRAC_1_PI","","1/π",42,null],[18,"FRAC_2_PI","","2/π",42,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",42,null],[18,"SQRT_2","","sqrt(2)",42,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",42,null],[18,"E","","Euler\'s number (e)",42,null],[18,"LOG2_E","","log2(e)",42,null],[18,"LOG10_E","","log10(e)",42,null],[18,"LN_2","","ln(2)",42,null],[18,"LN_10","","ln(10)",42,null],[11,"is_nan","","",42,[[],["m32x4",6]]],[11,"is_infinite","","",42,[[],["m32x4",6]]],[11,"is_finite","","",42,[[],["m32x4",6]]],[11,"abs","","Absolute value.",42,[[]]],[11,"cos","","Cosine.",42,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",42,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",42,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",42,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",42,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",42,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",42,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",42,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",42,[[]]],[11,"sin","","Sine.",42,[[]]],[11,"sin_pi","","Sine of `self * PI`.",42,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",42,[[]]],[11,"sqrt","","",42,[[]]],[11,"sqrte","","Square-root estimate.",42,[[]]],[11,"tanh","","Tanh.",42,[[]]],[11,"eq","","Lane-wise equality comparison.",42,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",42,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",42,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",42,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",42,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",42,[[],["m32x4",6]]],[11,"new","","Creates a new instance with each vector elements…",43,[[]]],[11,"lanes","","Returns the number of vector lanes.",43,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",43,[[]]],[11,"extract","","Extracts the value at `index`.",43,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",43,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",43,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",43,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",43,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",43,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",43,[[]]],[11,"all","","Are `all` vector lanes `true`?",43,[[]]],[11,"any","","Is `any` vector lane `true`?",43,[[]]],[11,"none","","Are `all` vector lanes `false`?",43,[[]]],[11,"eq","","Lane-wise equality comparison.",43,[[],["m32x4",6]]],[11,"ne","","Lane-wise inequality comparison.",43,[[],["m32x4",6]]],[11,"lt","","Lane-wise less-than comparison.",43,[[],["m32x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",43,[[],["m32x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",43,[[],["m32x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",43,[[],["m32x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",43,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",43,[[],[["lexicographicallyordered",3],["m32x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",43,[[],[["lexicographicallyordered",3],["m32x4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",43,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",43,[[]]],[11,"new","","Creates a new instance with each vector elements…",44,[[]]],[11,"lanes","","Returns the number of vector lanes.",44,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",44,[[]]],[11,"extract","","Extracts the value at `index`.",44,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",44,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",44,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",44,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",44,[[["i64x2",6]],["i64x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",44,[[["i64x2",6]],["i64x2",6]]],[11,"min","","Minimum of two vectors.",44,[[]]],[11,"max","","Maximum of two vectors.",44,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",44,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",44,[[]]],[11,"max_element","","Largest vector element value.",44,[[]]],[11,"min_element","","Smallest vector element value.",44,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",44,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",44,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",44,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",44,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",44,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",44,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",44,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",44,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",44,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",44,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",44,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",44,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",44,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",44,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",44,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",44,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",44,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",44,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",44,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",44,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",44,[[]]],[11,"eq","","Lane-wise equality comparison.",44,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",44,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",44,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",44,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",44,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",44,[[],["m64x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",44,[[],[["lexicographicallyordered",3],["i64x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",44,[[],[["lexicographicallyordered",3],["i64x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",44,[[]]],[11,"new","","Creates a new instance with each vector elements…",45,[[]]],[11,"lanes","","Returns the number of vector lanes.",45,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",45,[[]]],[11,"extract","","Extracts the value at `index`.",45,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",45,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",45,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",45,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",45,[[["u64x2",6]],["u64x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",45,[[["u64x2",6]],["u64x2",6]]],[11,"min","","Minimum of two vectors.",45,[[]]],[11,"max","","Maximum of two vectors.",45,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",45,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",45,[[]]],[11,"max_element","","Largest vector element value.",45,[[]]],[11,"min_element","","Smallest vector element value.",45,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",45,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",45,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",45,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",45,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",45,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",45,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",45,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",45,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",45,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",45,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",45,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",45,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",45,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",45,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",45,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",45,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",45,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",45,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",45,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",45,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",45,[[]]],[11,"eq","","Lane-wise equality comparison.",45,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",45,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",45,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",45,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",45,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",45,[[],["m64x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",45,[[],[["lexicographicallyordered",3],["u64x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",45,[[],[["lexicographicallyordered",3],["u64x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",45,[[]]],[11,"new","","Creates a new instance with each vector elements…",46,[[]]],[11,"lanes","","Returns the number of vector lanes.",46,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",46,[[]]],[11,"extract","","Extracts the value at `index`.",46,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",46,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",46,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",46,[[]]],[11,"min","","Minimum of two vectors.",46,[[]]],[11,"max","","Maximum of two vectors.",46,[[]]],[11,"sum","","Horizontal sum of the vector elements.",46,[[]]],[11,"product","","Horizontal product of the vector elements.",46,[[]]],[11,"max_element","","Largest vector element value.",46,[[]]],[11,"min_element","","Smallest vector element value.",46,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",46,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",46,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",46,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",46,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",46,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",46,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",46,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",46,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",46,[[]]],[18,"EPSILON","","Machine epsilon value.",46,null],[18,"MIN","","Smallest finite value.",46,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",46,null],[18,"MAX","","Largest finite value.",46,null],[18,"NAN","","Not a Number (NaN).",46,null],[18,"INFINITY","","Infinity (∞).",46,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",46,null],[18,"PI","","Archimedes\' constant (π)",46,null],[18,"FRAC_PI_2","","π/2",46,null],[18,"FRAC_PI_3","","π/3",46,null],[18,"FRAC_PI_4","","π/4",46,null],[18,"FRAC_PI_6","","π/6",46,null],[18,"FRAC_PI_8","","π/8",46,null],[18,"FRAC_1_PI","","1/π",46,null],[18,"FRAC_2_PI","","2/π",46,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",46,null],[18,"SQRT_2","","sqrt(2)",46,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",46,null],[18,"E","","Euler\'s number (e)",46,null],[18,"LOG2_E","","log2(e)",46,null],[18,"LOG10_E","","log10(e)",46,null],[18,"LN_2","","ln(2)",46,null],[18,"LN_10","","ln(10)",46,null],[11,"is_nan","","",46,[[],["m64x2",6]]],[11,"is_infinite","","",46,[[],["m64x2",6]]],[11,"is_finite","","",46,[[],["m64x2",6]]],[11,"abs","","Absolute value.",46,[[]]],[11,"cos","","Cosine.",46,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",46,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",46,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",46,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",46,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",46,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",46,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",46,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",46,[[]]],[11,"sin","","Sine.",46,[[]]],[11,"sin_pi","","Sine of `self * PI`.",46,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",46,[[]]],[11,"sqrt","","",46,[[]]],[11,"sqrte","","Square-root estimate.",46,[[]]],[11,"tanh","","Tanh.",46,[[]]],[11,"eq","","Lane-wise equality comparison.",46,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",46,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",46,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",46,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",46,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",46,[[],["m64x2",6]]],[11,"new","","Creates a new instance with each vector elements…",47,[[]]],[11,"lanes","","Returns the number of vector lanes.",47,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",47,[[]]],[11,"extract","","Extracts the value at `index`.",47,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",47,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",47,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",47,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",47,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",47,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",47,[[]]],[11,"all","","Are `all` vector lanes `true`?",47,[[]]],[11,"any","","Is `any` vector lane `true`?",47,[[]]],[11,"none","","Are `all` vector lanes `false`?",47,[[]]],[11,"eq","","Lane-wise equality comparison.",47,[[],["m64x2",6]]],[11,"ne","","Lane-wise inequality comparison.",47,[[],["m64x2",6]]],[11,"lt","","Lane-wise less-than comparison.",47,[[],["m64x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",47,[[],["m64x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",47,[[],["m64x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",47,[[],["m64x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",47,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",47,[[],[["lexicographicallyordered",3],["m64x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",47,[[],[["lexicographicallyordered",3],["m64x2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",47,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",47,[[]]],[11,"new","","Creates a new instance with each vector elements…",51,[[]]],[11,"lanes","","Returns the number of vector lanes.",51,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",51,[[]]],[11,"extract","","Extracts the value at `index`.",51,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",51,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",51,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",51,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",51,[[["i128x1",6]],["i128x1",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",51,[[["i128x1",6]],["i128x1",6]]],[11,"min","","Minimum of two vectors.",51,[[]]],[11,"max","","Maximum of two vectors.",51,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",51,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",51,[[]]],[11,"max_element","","Largest vector element value.",51,[[]]],[11,"min_element","","Smallest vector element value.",51,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",51,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",51,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",51,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",51,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",51,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",51,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",51,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",51,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",51,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",51,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",51,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",51,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",51,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",51,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",51,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",51,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",51,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",51,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",51,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",51,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",51,[[]]],[11,"eq","","Lane-wise equality comparison.",51,[[],["m128x1",6]]],[11,"ne","","Lane-wise inequality comparison.",51,[[],["m128x1",6]]],[11,"lt","","Lane-wise less-than comparison.",51,[[],["m128x1",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",51,[[],["m128x1",6]]],[11,"gt","","Lane-wise greater-than comparison.",51,[[],["m128x1",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",51,[[],["m128x1",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",51,[[],[["i128x1",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",51,[[],[["i128x1",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",51,[[]]],[11,"new","","Creates a new instance with each vector elements…",52,[[]]],[11,"lanes","","Returns the number of vector lanes.",52,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",52,[[]]],[11,"extract","","Extracts the value at `index`.",52,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",52,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",52,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",52,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",52,[[["u128x1",6]],["u128x1",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",52,[[["u128x1",6]],["u128x1",6]]],[11,"min","","Minimum of two vectors.",52,[[]]],[11,"max","","Maximum of two vectors.",52,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",52,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",52,[[]]],[11,"max_element","","Largest vector element value.",52,[[]]],[11,"min_element","","Smallest vector element value.",52,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",52,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",52,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",52,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",52,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",52,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",52,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",52,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",52,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",52,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",52,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",52,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",52,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",52,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",52,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",52,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",52,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",52,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",52,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",52,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",52,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",52,[[]]],[11,"eq","","Lane-wise equality comparison.",52,[[],["m128x1",6]]],[11,"ne","","Lane-wise inequality comparison.",52,[[],["m128x1",6]]],[11,"lt","","Lane-wise less-than comparison.",52,[[],["m128x1",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",52,[[],["m128x1",6]]],[11,"gt","","Lane-wise greater-than comparison.",52,[[],["m128x1",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",52,[[],["m128x1",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",52,[[],[["lexicographicallyordered",3],["u128x1",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",52,[[],[["lexicographicallyordered",3],["u128x1",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",52,[[]]],[11,"new","","Creates a new instance with each vector elements…",53,[[]]],[11,"lanes","","Returns the number of vector lanes.",53,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",53,[[]]],[11,"extract","","Extracts the value at `index`.",53,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",53,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",53,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",53,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",53,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",53,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",53,[[]]],[11,"all","","Are `all` vector lanes `true`?",53,[[]]],[11,"any","","Is `any` vector lane `true`?",53,[[]]],[11,"none","","Are `all` vector lanes `false`?",53,[[]]],[11,"eq","","Lane-wise equality comparison.",53,[[],["m128x1",6]]],[11,"ne","","Lane-wise inequality comparison.",53,[[],["m128x1",6]]],[11,"lt","","Lane-wise less-than comparison.",53,[[],["m128x1",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",53,[[],["m128x1",6]]],[11,"gt","","Lane-wise greater-than comparison.",53,[[],["m128x1",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",53,[[],["m128x1",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",53,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",53,[[],[["lexicographicallyordered",3],["m128x1",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",53,[[],[["lexicographicallyordered",3],["m128x1",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",53,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",53,[[]]],[11,"new","","Creates a new instance with each vector elements…",54,[[]]],[11,"lanes","","Returns the number of vector lanes.",54,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",54,[[]]],[11,"extract","","Extracts the value at `index`.",54,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",54,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",54,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",54,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",54,[[["i8x32",6]],["i8x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",54,[[["i8x32",6]],["i8x32",6]]],[11,"min","","Minimum of two vectors.",54,[[]]],[11,"max","","Maximum of two vectors.",54,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",54,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",54,[[]]],[11,"max_element","","Largest vector element value.",54,[[]]],[11,"min_element","","Smallest vector element value.",54,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",54,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",54,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",54,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",54,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",54,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",54,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",54,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",54,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",54,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",54,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",54,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",54,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",54,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",54,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",54,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",54,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",54,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",54,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",54,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",54,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",54,[[]]],[11,"eq","","Lane-wise equality comparison.",54,[[],["m8x32",6]]],[11,"ne","","Lane-wise inequality comparison.",54,[[],["m8x32",6]]],[11,"lt","","Lane-wise less-than comparison.",54,[[],["m8x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",54,[[],["m8x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",54,[[],["m8x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",54,[[],["m8x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",54,[[],[["lexicographicallyordered",3],["i8x32",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",54,[[],[["lexicographicallyordered",3],["i8x32",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",54,[[]]],[11,"new","","Creates a new instance with each vector elements…",55,[[]]],[11,"lanes","","Returns the number of vector lanes.",55,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",55,[[]]],[11,"extract","","Extracts the value at `index`.",55,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",55,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",55,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",55,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",55,[[["u8x32",6]],["u8x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",55,[[["u8x32",6]],["u8x32",6]]],[11,"min","","Minimum of two vectors.",55,[[]]],[11,"max","","Maximum of two vectors.",55,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",55,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",55,[[]]],[11,"max_element","","Largest vector element value.",55,[[]]],[11,"min_element","","Smallest vector element value.",55,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",55,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",55,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",55,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",55,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",55,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",55,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",55,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",55,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",55,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",55,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",55,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",55,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",55,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",55,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",55,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",55,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",55,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",55,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",55,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",55,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",55,[[]]],[11,"eq","","Lane-wise equality comparison.",55,[[],["m8x32",6]]],[11,"ne","","Lane-wise inequality comparison.",55,[[],["m8x32",6]]],[11,"lt","","Lane-wise less-than comparison.",55,[[],["m8x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",55,[[],["m8x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",55,[[],["m8x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",55,[[],["m8x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",55,[[],[["lexicographicallyordered",3],["u8x32",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",55,[[],[["lexicographicallyordered",3],["u8x32",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",55,[[]]],[11,"new","","Creates a new instance with each vector elements…",56,[[]]],[11,"lanes","","Returns the number of vector lanes.",56,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",56,[[]]],[11,"extract","","Extracts the value at `index`.",56,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",56,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",56,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",56,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",56,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",56,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",56,[[]]],[11,"all","","Are `all` vector lanes `true`?",56,[[]]],[11,"any","","Is `any` vector lane `true`?",56,[[]]],[11,"none","","Are `all` vector lanes `false`?",56,[[]]],[11,"eq","","Lane-wise equality comparison.",56,[[],["m8x32",6]]],[11,"ne","","Lane-wise inequality comparison.",56,[[],["m8x32",6]]],[11,"lt","","Lane-wise less-than comparison.",56,[[],["m8x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",56,[[],["m8x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",56,[[],["m8x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",56,[[],["m8x32",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",56,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",56,[[],[["lexicographicallyordered",3],["m8x32",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",56,[[],[["lexicographicallyordered",3],["m8x32",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",56,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",56,[[]]],[11,"new","","Creates a new instance with each vector elements…",57,[[]]],[11,"lanes","","Returns the number of vector lanes.",57,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",57,[[]]],[11,"extract","","Extracts the value at `index`.",57,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",57,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",57,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",57,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",57,[[["i16x16",6]],["i16x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",57,[[["i16x16",6]],["i16x16",6]]],[11,"min","","Minimum of two vectors.",57,[[]]],[11,"max","","Maximum of two vectors.",57,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",57,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",57,[[]]],[11,"max_element","","Largest vector element value.",57,[[]]],[11,"min_element","","Smallest vector element value.",57,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",57,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",57,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",57,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",57,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",57,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",57,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",57,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",57,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",57,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",57,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",57,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",57,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",57,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",57,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",57,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",57,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",57,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",57,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",57,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",57,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",57,[[]]],[11,"eq","","Lane-wise equality comparison.",57,[[],["m16x16",6]]],[11,"ne","","Lane-wise inequality comparison.",57,[[],["m16x16",6]]],[11,"lt","","Lane-wise less-than comparison.",57,[[],["m16x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",57,[[],["m16x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",57,[[],["m16x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",57,[[],["m16x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",57,[[],[["lexicographicallyordered",3],["i16x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",57,[[],[["lexicographicallyordered",3],["i16x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",57,[[]]],[11,"new","","Creates a new instance with each vector elements…",58,[[]]],[11,"lanes","","Returns the number of vector lanes.",58,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",58,[[]]],[11,"extract","","Extracts the value at `index`.",58,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",58,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",58,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",58,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",58,[[["u16x16",6]],["u16x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",58,[[["u16x16",6]],["u16x16",6]]],[11,"min","","Minimum of two vectors.",58,[[]]],[11,"max","","Maximum of two vectors.",58,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",58,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",58,[[]]],[11,"max_element","","Largest vector element value.",58,[[]]],[11,"min_element","","Smallest vector element value.",58,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",58,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",58,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",58,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",58,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",58,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",58,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",58,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",58,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",58,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",58,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",58,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",58,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",58,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",58,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",58,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",58,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",58,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",58,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",58,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",58,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",58,[[]]],[11,"eq","","Lane-wise equality comparison.",58,[[],["m16x16",6]]],[11,"ne","","Lane-wise inequality comparison.",58,[[],["m16x16",6]]],[11,"lt","","Lane-wise less-than comparison.",58,[[],["m16x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",58,[[],["m16x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",58,[[],["m16x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",58,[[],["m16x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",58,[[],[["lexicographicallyordered",3],["u16x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",58,[[],[["lexicographicallyordered",3],["u16x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",58,[[]]],[11,"new","","Creates a new instance with each vector elements…",59,[[]]],[11,"lanes","","Returns the number of vector lanes.",59,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",59,[[]]],[11,"extract","","Extracts the value at `index`.",59,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",59,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",59,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",59,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",59,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",59,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",59,[[]]],[11,"all","","Are `all` vector lanes `true`?",59,[[]]],[11,"any","","Is `any` vector lane `true`?",59,[[]]],[11,"none","","Are `all` vector lanes `false`?",59,[[]]],[11,"eq","","Lane-wise equality comparison.",59,[[],["m16x16",6]]],[11,"ne","","Lane-wise inequality comparison.",59,[[],["m16x16",6]]],[11,"lt","","Lane-wise less-than comparison.",59,[[],["m16x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",59,[[],["m16x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",59,[[],["m16x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",59,[[],["m16x16",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",59,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",59,[[],[["lexicographicallyordered",3],["m16x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",59,[[],[["lexicographicallyordered",3],["m16x16",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",59,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",59,[[]]],[11,"new","","Creates a new instance with each vector elements…",60,[[]]],[11,"lanes","","Returns the number of vector lanes.",60,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",60,[[]]],[11,"extract","","Extracts the value at `index`.",60,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",60,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",60,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",60,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",60,[[["i32x8",6]],["i32x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",60,[[["i32x8",6]],["i32x8",6]]],[11,"min","","Minimum of two vectors.",60,[[]]],[11,"max","","Maximum of two vectors.",60,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",60,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",60,[[]]],[11,"max_element","","Largest vector element value.",60,[[]]],[11,"min_element","","Smallest vector element value.",60,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",60,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",60,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",60,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",60,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",60,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",60,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",60,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",60,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",60,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",60,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",60,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",60,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",60,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",60,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",60,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",60,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",60,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",60,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",60,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",60,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",60,[[]]],[11,"eq","","Lane-wise equality comparison.",60,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",60,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",60,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",60,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",60,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",60,[[],["m32x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",60,[[],[["lexicographicallyordered",3],["i32x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",60,[[],[["lexicographicallyordered",3],["i32x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",60,[[]]],[11,"new","","Creates a new instance with each vector elements…",61,[[]]],[11,"lanes","","Returns the number of vector lanes.",61,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",61,[[]]],[11,"extract","","Extracts the value at `index`.",61,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",61,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",61,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",61,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",61,[[["u32x8",6]],["u32x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",61,[[["u32x8",6]],["u32x8",6]]],[11,"min","","Minimum of two vectors.",61,[[]]],[11,"max","","Maximum of two vectors.",61,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",61,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",61,[[]]],[11,"max_element","","Largest vector element value.",61,[[]]],[11,"min_element","","Smallest vector element value.",61,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",61,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",61,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",61,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",61,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",61,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",61,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",61,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",61,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",61,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",61,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",61,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",61,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",61,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",61,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",61,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",61,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",61,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",61,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",61,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",61,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",61,[[]]],[11,"eq","","Lane-wise equality comparison.",61,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",61,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",61,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",61,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",61,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",61,[[],["m32x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",61,[[],[["u32x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",61,[[],[["u32x8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",61,[[]]],[11,"new","","Creates a new instance with each vector elements…",62,[[]]],[11,"lanes","","Returns the number of vector lanes.",62,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",62,[[]]],[11,"extract","","Extracts the value at `index`.",62,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",62,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",62,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",62,[[]]],[11,"min","","Minimum of two vectors.",62,[[]]],[11,"max","","Maximum of two vectors.",62,[[]]],[11,"sum","","Horizontal sum of the vector elements.",62,[[]]],[11,"product","","Horizontal product of the vector elements.",62,[[]]],[11,"max_element","","Largest vector element value.",62,[[]]],[11,"min_element","","Smallest vector element value.",62,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",62,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",62,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",62,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",62,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",62,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",62,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",62,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",62,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",62,[[]]],[18,"EPSILON","","Machine epsilon value.",62,null],[18,"MIN","","Smallest finite value.",62,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",62,null],[18,"MAX","","Largest finite value.",62,null],[18,"NAN","","Not a Number (NaN).",62,null],[18,"INFINITY","","Infinity (∞).",62,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",62,null],[18,"PI","","Archimedes\' constant (π)",62,null],[18,"FRAC_PI_2","","π/2",62,null],[18,"FRAC_PI_3","","π/3",62,null],[18,"FRAC_PI_4","","π/4",62,null],[18,"FRAC_PI_6","","π/6",62,null],[18,"FRAC_PI_8","","π/8",62,null],[18,"FRAC_1_PI","","1/π",62,null],[18,"FRAC_2_PI","","2/π",62,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",62,null],[18,"SQRT_2","","sqrt(2)",62,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",62,null],[18,"E","","Euler\'s number (e)",62,null],[18,"LOG2_E","","log2(e)",62,null],[18,"LOG10_E","","log10(e)",62,null],[18,"LN_2","","ln(2)",62,null],[18,"LN_10","","ln(10)",62,null],[11,"is_nan","","",62,[[],["m32x8",6]]],[11,"is_infinite","","",62,[[],["m32x8",6]]],[11,"is_finite","","",62,[[],["m32x8",6]]],[11,"abs","","Absolute value.",62,[[]]],[11,"cos","","Cosine.",62,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",62,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",62,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",62,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",62,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",62,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",62,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",62,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",62,[[]]],[11,"sin","","Sine.",62,[[]]],[11,"sin_pi","","Sine of `self * PI`.",62,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",62,[[]]],[11,"sqrt","","",62,[[]]],[11,"sqrte","","Square-root estimate.",62,[[]]],[11,"tanh","","Tanh.",62,[[]]],[11,"eq","","Lane-wise equality comparison.",62,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",62,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",62,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",62,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",62,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",62,[[],["m32x8",6]]],[11,"new","","Creates a new instance with each vector elements…",63,[[]]],[11,"lanes","","Returns the number of vector lanes.",63,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",63,[[]]],[11,"extract","","Extracts the value at `index`.",63,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",63,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",63,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",63,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",63,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",63,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",63,[[]]],[11,"all","","Are `all` vector lanes `true`?",63,[[]]],[11,"any","","Is `any` vector lane `true`?",63,[[]]],[11,"none","","Are `all` vector lanes `false`?",63,[[]]],[11,"eq","","Lane-wise equality comparison.",63,[[],["m32x8",6]]],[11,"ne","","Lane-wise inequality comparison.",63,[[],["m32x8",6]]],[11,"lt","","Lane-wise less-than comparison.",63,[[],["m32x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",63,[[],["m32x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",63,[[],["m32x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",63,[[],["m32x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",63,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",63,[[],[["lexicographicallyordered",3],["m32x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",63,[[],[["lexicographicallyordered",3],["m32x8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",63,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",63,[[]]],[11,"new","","Creates a new instance with each vector elements…",64,[[]]],[11,"lanes","","Returns the number of vector lanes.",64,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",64,[[]]],[11,"extract","","Extracts the value at `index`.",64,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",64,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",64,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",64,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",64,[[["i64x4",6]],["i64x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",64,[[["i64x4",6]],["i64x4",6]]],[11,"min","","Minimum of two vectors.",64,[[]]],[11,"max","","Maximum of two vectors.",64,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",64,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",64,[[]]],[11,"max_element","","Largest vector element value.",64,[[]]],[11,"min_element","","Smallest vector element value.",64,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",64,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",64,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",64,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",64,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",64,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",64,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",64,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",64,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",64,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",64,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",64,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",64,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",64,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",64,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",64,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",64,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",64,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",64,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",64,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",64,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",64,[[]]],[11,"eq","","Lane-wise equality comparison.",64,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",64,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",64,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",64,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",64,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",64,[[],["m64x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",64,[[],[["lexicographicallyordered",3],["i64x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",64,[[],[["lexicographicallyordered",3],["i64x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",64,[[]]],[11,"new","","Creates a new instance with each vector elements…",65,[[]]],[11,"lanes","","Returns the number of vector lanes.",65,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",65,[[]]],[11,"extract","","Extracts the value at `index`.",65,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",65,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",65,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",65,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",65,[[["u64x4",6]],["u64x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",65,[[["u64x4",6]],["u64x4",6]]],[11,"min","","Minimum of two vectors.",65,[[]]],[11,"max","","Maximum of two vectors.",65,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",65,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",65,[[]]],[11,"max_element","","Largest vector element value.",65,[[]]],[11,"min_element","","Smallest vector element value.",65,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",65,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",65,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",65,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",65,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",65,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",65,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",65,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",65,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",65,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",65,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",65,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",65,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",65,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",65,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",65,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",65,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",65,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",65,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",65,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",65,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",65,[[]]],[11,"eq","","Lane-wise equality comparison.",65,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",65,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",65,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",65,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",65,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",65,[[],["m64x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",65,[[],[["lexicographicallyordered",3],["u64x4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",65,[[],[["lexicographicallyordered",3],["u64x4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",65,[[]]],[11,"new","","Creates a new instance with each vector elements…",66,[[]]],[11,"lanes","","Returns the number of vector lanes.",66,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",66,[[]]],[11,"extract","","Extracts the value at `index`.",66,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",66,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",66,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",66,[[]]],[11,"min","","Minimum of two vectors.",66,[[]]],[11,"max","","Maximum of two vectors.",66,[[]]],[11,"sum","","Horizontal sum of the vector elements.",66,[[]]],[11,"product","","Horizontal product of the vector elements.",66,[[]]],[11,"max_element","","Largest vector element value.",66,[[]]],[11,"min_element","","Smallest vector element value.",66,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",66,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",66,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",66,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",66,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",66,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",66,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",66,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",66,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",66,[[]]],[18,"EPSILON","","Machine epsilon value.",66,null],[18,"MIN","","Smallest finite value.",66,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",66,null],[18,"MAX","","Largest finite value.",66,null],[18,"NAN","","Not a Number (NaN).",66,null],[18,"INFINITY","","Infinity (∞).",66,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",66,null],[18,"PI","","Archimedes\' constant (π)",66,null],[18,"FRAC_PI_2","","π/2",66,null],[18,"FRAC_PI_3","","π/3",66,null],[18,"FRAC_PI_4","","π/4",66,null],[18,"FRAC_PI_6","","π/6",66,null],[18,"FRAC_PI_8","","π/8",66,null],[18,"FRAC_1_PI","","1/π",66,null],[18,"FRAC_2_PI","","2/π",66,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",66,null],[18,"SQRT_2","","sqrt(2)",66,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",66,null],[18,"E","","Euler\'s number (e)",66,null],[18,"LOG2_E","","log2(e)",66,null],[18,"LOG10_E","","log10(e)",66,null],[18,"LN_2","","ln(2)",66,null],[18,"LN_10","","ln(10)",66,null],[11,"is_nan","","",66,[[],["m64x4",6]]],[11,"is_infinite","","",66,[[],["m64x4",6]]],[11,"is_finite","","",66,[[],["m64x4",6]]],[11,"abs","","Absolute value.",66,[[]]],[11,"cos","","Cosine.",66,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",66,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",66,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",66,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",66,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",66,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",66,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",66,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",66,[[]]],[11,"sin","","Sine.",66,[[]]],[11,"sin_pi","","Sine of `self * PI`.",66,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",66,[[]]],[11,"sqrt","","",66,[[]]],[11,"sqrte","","Square-root estimate.",66,[[]]],[11,"tanh","","Tanh.",66,[[]]],[11,"eq","","Lane-wise equality comparison.",66,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",66,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",66,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",66,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",66,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",66,[[],["m64x4",6]]],[11,"new","","Creates a new instance with each vector elements…",67,[[]]],[11,"lanes","","Returns the number of vector lanes.",67,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",67,[[]]],[11,"extract","","Extracts the value at `index`.",67,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",67,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",67,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",67,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",67,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",67,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",67,[[]]],[11,"all","","Are `all` vector lanes `true`?",67,[[]]],[11,"any","","Is `any` vector lane `true`?",67,[[]]],[11,"none","","Are `all` vector lanes `false`?",67,[[]]],[11,"eq","","Lane-wise equality comparison.",67,[[],["m64x4",6]]],[11,"ne","","Lane-wise inequality comparison.",67,[[],["m64x4",6]]],[11,"lt","","Lane-wise less-than comparison.",67,[[],["m64x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",67,[[],["m64x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",67,[[],["m64x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",67,[[],["m64x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",67,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",67,[[],[["m64x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",67,[[],[["m64x4",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",67,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",67,[[]]],[11,"new","","Creates a new instance with each vector elements…",68,[[]]],[11,"lanes","","Returns the number of vector lanes.",68,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",68,[[]]],[11,"extract","","Extracts the value at `index`.",68,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",68,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",68,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",68,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",68,[[["i128x2",6]],["i128x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",68,[[["i128x2",6]],["i128x2",6]]],[11,"min","","Minimum of two vectors.",68,[[]]],[11,"max","","Maximum of two vectors.",68,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",68,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",68,[[]]],[11,"max_element","","Largest vector element value.",68,[[]]],[11,"min_element","","Smallest vector element value.",68,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",68,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",68,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",68,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",68,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",68,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",68,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",68,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",68,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",68,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",68,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",68,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",68,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",68,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",68,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",68,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",68,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",68,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",68,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",68,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",68,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",68,[[]]],[11,"eq","","Lane-wise equality comparison.",68,[[],["m128x2",6]]],[11,"ne","","Lane-wise inequality comparison.",68,[[],["m128x2",6]]],[11,"lt","","Lane-wise less-than comparison.",68,[[],["m128x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",68,[[],["m128x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",68,[[],["m128x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",68,[[],["m128x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",68,[[],[["lexicographicallyordered",3],["i128x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",68,[[],[["lexicographicallyordered",3],["i128x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",68,[[]]],[11,"new","","Creates a new instance with each vector elements…",69,[[]]],[11,"lanes","","Returns the number of vector lanes.",69,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",69,[[]]],[11,"extract","","Extracts the value at `index`.",69,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",69,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",69,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",69,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",69,[[["u128x2",6]],["u128x2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",69,[[["u128x2",6]],["u128x2",6]]],[11,"min","","Minimum of two vectors.",69,[[]]],[11,"max","","Maximum of two vectors.",69,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",69,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",69,[[]]],[11,"max_element","","Largest vector element value.",69,[[]]],[11,"min_element","","Smallest vector element value.",69,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",69,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",69,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",69,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",69,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",69,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",69,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",69,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",69,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",69,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",69,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",69,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",69,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",69,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",69,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",69,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",69,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",69,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",69,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",69,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",69,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",69,[[]]],[11,"eq","","Lane-wise equality comparison.",69,[[],["m128x2",6]]],[11,"ne","","Lane-wise inequality comparison.",69,[[],["m128x2",6]]],[11,"lt","","Lane-wise less-than comparison.",69,[[],["m128x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",69,[[],["m128x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",69,[[],["m128x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",69,[[],["m128x2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",69,[[],[["lexicographicallyordered",3],["u128x2",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",69,[[],[["lexicographicallyordered",3],["u128x2",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",69,[[]]],[11,"new","","Creates a new instance with each vector elements…",70,[[]]],[11,"lanes","","Returns the number of vector lanes.",70,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",70,[[]]],[11,"extract","","Extracts the value at `index`.",70,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",70,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",70,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",70,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",70,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",70,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",70,[[]]],[11,"all","","Are `all` vector lanes `true`?",70,[[]]],[11,"any","","Is `any` vector lane `true`?",70,[[]]],[11,"none","","Are `all` vector lanes `false`?",70,[[]]],[11,"eq","","Lane-wise equality comparison.",70,[[],["m128x2",6]]],[11,"ne","","Lane-wise inequality comparison.",70,[[],["m128x2",6]]],[11,"lt","","Lane-wise less-than comparison.",70,[[],["m128x2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",70,[[],["m128x2",6]]],[11,"gt","","Lane-wise greater-than comparison.",70,[[],["m128x2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",70,[[],["m128x2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",70,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",70,[[],[["m128x2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",70,[[],[["m128x2",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",70,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",70,[[]]],[11,"new","","Creates a new instance with each vector elements…",74,[[]]],[11,"lanes","","Returns the number of vector lanes.",74,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",74,[[]]],[11,"extract","","Extracts the value at `index`.",74,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",74,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",74,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",74,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",74,[[["i8x64",6]],["i8x64",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",74,[[["i8x64",6]],["i8x64",6]]],[11,"min","","Minimum of two vectors.",74,[[]]],[11,"max","","Maximum of two vectors.",74,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",74,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",74,[[]]],[11,"max_element","","Largest vector element value.",74,[[]]],[11,"min_element","","Smallest vector element value.",74,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",74,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",74,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",74,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",74,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",74,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",74,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",74,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",74,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",74,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",74,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",74,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",74,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",74,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",74,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",74,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",74,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",74,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",74,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",74,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",74,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",74,[[]]],[11,"eq","","Lane-wise equality comparison.",74,[[],["m8x64",6]]],[11,"ne","","Lane-wise inequality comparison.",74,[[],["m8x64",6]]],[11,"lt","","Lane-wise less-than comparison.",74,[[],["m8x64",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",74,[[],["m8x64",6]]],[11,"gt","","Lane-wise greater-than comparison.",74,[[],["m8x64",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",74,[[],["m8x64",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",74,[[],[["i8x64",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",74,[[],[["i8x64",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",74,[[]]],[11,"new","","Creates a new instance with each vector elements…",75,[[]]],[11,"lanes","","Returns the number of vector lanes.",75,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",75,[[]]],[11,"extract","","Extracts the value at `index`.",75,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",75,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",75,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",75,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",75,[[["u8x64",6]],["u8x64",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",75,[[["u8x64",6]],["u8x64",6]]],[11,"min","","Minimum of two vectors.",75,[[]]],[11,"max","","Maximum of two vectors.",75,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",75,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",75,[[]]],[11,"max_element","","Largest vector element value.",75,[[]]],[11,"min_element","","Smallest vector element value.",75,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",75,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",75,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",75,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",75,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",75,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",75,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",75,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",75,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",75,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",75,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",75,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",75,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",75,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",75,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",75,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",75,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",75,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",75,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",75,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",75,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",75,[[]]],[11,"eq","","Lane-wise equality comparison.",75,[[],["m8x64",6]]],[11,"ne","","Lane-wise inequality comparison.",75,[[],["m8x64",6]]],[11,"lt","","Lane-wise less-than comparison.",75,[[],["m8x64",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",75,[[],["m8x64",6]]],[11,"gt","","Lane-wise greater-than comparison.",75,[[],["m8x64",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",75,[[],["m8x64",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",75,[[],[["u8x64",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",75,[[],[["u8x64",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",75,[[]]],[11,"new","","Creates a new instance with each vector elements…",76,[[]]],[11,"lanes","","Returns the number of vector lanes.",76,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",76,[[]]],[11,"extract","","Extracts the value at `index`.",76,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",76,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",76,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",76,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",76,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",76,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",76,[[]]],[11,"all","","Are `all` vector lanes `true`?",76,[[]]],[11,"any","","Is `any` vector lane `true`?",76,[[]]],[11,"none","","Are `all` vector lanes `false`?",76,[[]]],[11,"eq","","Lane-wise equality comparison.",76,[[],["m8x64",6]]],[11,"ne","","Lane-wise inequality comparison.",76,[[],["m8x64",6]]],[11,"lt","","Lane-wise less-than comparison.",76,[[],["m8x64",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",76,[[],["m8x64",6]]],[11,"gt","","Lane-wise greater-than comparison.",76,[[],["m8x64",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",76,[[],["m8x64",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",76,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",76,[[],[["m8x64",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",76,[[],[["m8x64",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",76,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",76,[[]]],[11,"new","","Creates a new instance with each vector elements…",77,[[]]],[11,"lanes","","Returns the number of vector lanes.",77,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",77,[[]]],[11,"extract","","Extracts the value at `index`.",77,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",77,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",77,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",77,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",77,[[["i16x32",6]],["i16x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",77,[[["i16x32",6]],["i16x32",6]]],[11,"min","","Minimum of two vectors.",77,[[]]],[11,"max","","Maximum of two vectors.",77,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",77,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",77,[[]]],[11,"max_element","","Largest vector element value.",77,[[]]],[11,"min_element","","Smallest vector element value.",77,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",77,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",77,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",77,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",77,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",77,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",77,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",77,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",77,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",77,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",77,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",77,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",77,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",77,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",77,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",77,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",77,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",77,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",77,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",77,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",77,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",77,[[]]],[11,"eq","","Lane-wise equality comparison.",77,[[],["m16x32",6]]],[11,"ne","","Lane-wise inequality comparison.",77,[[],["m16x32",6]]],[11,"lt","","Lane-wise less-than comparison.",77,[[],["m16x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",77,[[],["m16x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",77,[[],["m16x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",77,[[],["m16x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",77,[[],[["i16x32",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",77,[[],[["i16x32",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",77,[[]]],[11,"new","","Creates a new instance with each vector elements…",78,[[]]],[11,"lanes","","Returns the number of vector lanes.",78,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",78,[[]]],[11,"extract","","Extracts the value at `index`.",78,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",78,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",78,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",78,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",78,[[["u16x32",6]],["u16x32",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",78,[[["u16x32",6]],["u16x32",6]]],[11,"min","","Minimum of two vectors.",78,[[]]],[11,"max","","Maximum of two vectors.",78,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",78,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",78,[[]]],[11,"max_element","","Largest vector element value.",78,[[]]],[11,"min_element","","Smallest vector element value.",78,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",78,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",78,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",78,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",78,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",78,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",78,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",78,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",78,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",78,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",78,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",78,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",78,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",78,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",78,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",78,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",78,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",78,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",78,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",78,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",78,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",78,[[]]],[11,"eq","","Lane-wise equality comparison.",78,[[],["m16x32",6]]],[11,"ne","","Lane-wise inequality comparison.",78,[[],["m16x32",6]]],[11,"lt","","Lane-wise less-than comparison.",78,[[],["m16x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",78,[[],["m16x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",78,[[],["m16x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",78,[[],["m16x32",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",78,[[],[["u16x32",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",78,[[],[["u16x32",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",78,[[]]],[11,"new","","Creates a new instance with each vector elements…",79,[[]]],[11,"lanes","","Returns the number of vector lanes.",79,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",79,[[]]],[11,"extract","","Extracts the value at `index`.",79,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",79,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",79,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",79,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",79,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",79,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",79,[[]]],[11,"all","","Are `all` vector lanes `true`?",79,[[]]],[11,"any","","Is `any` vector lane `true`?",79,[[]]],[11,"none","","Are `all` vector lanes `false`?",79,[[]]],[11,"eq","","Lane-wise equality comparison.",79,[[],["m16x32",6]]],[11,"ne","","Lane-wise inequality comparison.",79,[[],["m16x32",6]]],[11,"lt","","Lane-wise less-than comparison.",79,[[],["m16x32",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",79,[[],["m16x32",6]]],[11,"gt","","Lane-wise greater-than comparison.",79,[[],["m16x32",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",79,[[],["m16x32",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",79,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",79,[[],[["m16x32",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",79,[[],[["m16x32",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",79,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",79,[[]]],[11,"new","","Creates a new instance with each vector elements…",80,[[]]],[11,"lanes","","Returns the number of vector lanes.",80,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",80,[[]]],[11,"extract","","Extracts the value at `index`.",80,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",80,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",80,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",80,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",80,[[["i32x16",6]],["i32x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",80,[[["i32x16",6]],["i32x16",6]]],[11,"min","","Minimum of two vectors.",80,[[]]],[11,"max","","Maximum of two vectors.",80,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",80,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",80,[[]]],[11,"max_element","","Largest vector element value.",80,[[]]],[11,"min_element","","Smallest vector element value.",80,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",80,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",80,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",80,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",80,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",80,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",80,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",80,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",80,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",80,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",80,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",80,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",80,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",80,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",80,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",80,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",80,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",80,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",80,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",80,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",80,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",80,[[]]],[11,"eq","","Lane-wise equality comparison.",80,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",80,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",80,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",80,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",80,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",80,[[],["m32x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",80,[[],[["i32x16",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",80,[[],[["i32x16",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",80,[[]]],[11,"new","","Creates a new instance with each vector elements…",81,[[]]],[11,"lanes","","Returns the number of vector lanes.",81,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",81,[[]]],[11,"extract","","Extracts the value at `index`.",81,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",81,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",81,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",81,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",81,[[["u32x16",6]],["u32x16",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",81,[[["u32x16",6]],["u32x16",6]]],[11,"min","","Minimum of two vectors.",81,[[]]],[11,"max","","Maximum of two vectors.",81,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",81,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",81,[[]]],[11,"max_element","","Largest vector element value.",81,[[]]],[11,"min_element","","Smallest vector element value.",81,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",81,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",81,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",81,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",81,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",81,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",81,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",81,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",81,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",81,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",81,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",81,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",81,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",81,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",81,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",81,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",81,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",81,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",81,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",81,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",81,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",81,[[]]],[11,"eq","","Lane-wise equality comparison.",81,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",81,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",81,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",81,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",81,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",81,[[],["m32x16",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",81,[[],[["lexicographicallyordered",3],["u32x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",81,[[],[["lexicographicallyordered",3],["u32x16",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",81,[[]]],[11,"new","","Creates a new instance with each vector elements…",82,[[]]],[11,"lanes","","Returns the number of vector lanes.",82,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",82,[[]]],[11,"extract","","Extracts the value at `index`.",82,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",82,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",82,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",82,[[]]],[11,"min","","Minimum of two vectors.",82,[[]]],[11,"max","","Maximum of two vectors.",82,[[]]],[11,"sum","","Horizontal sum of the vector elements.",82,[[]]],[11,"product","","Horizontal product of the vector elements.",82,[[]]],[11,"max_element","","Largest vector element value.",82,[[]]],[11,"min_element","","Smallest vector element value.",82,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",82,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",82,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",82,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",82,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",82,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",82,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",82,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",82,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",82,[[]]],[18,"EPSILON","","Machine epsilon value.",82,null],[18,"MIN","","Smallest finite value.",82,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",82,null],[18,"MAX","","Largest finite value.",82,null],[18,"NAN","","Not a Number (NaN).",82,null],[18,"INFINITY","","Infinity (∞).",82,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",82,null],[18,"PI","","Archimedes\' constant (π)",82,null],[18,"FRAC_PI_2","","π/2",82,null],[18,"FRAC_PI_3","","π/3",82,null],[18,"FRAC_PI_4","","π/4",82,null],[18,"FRAC_PI_6","","π/6",82,null],[18,"FRAC_PI_8","","π/8",82,null],[18,"FRAC_1_PI","","1/π",82,null],[18,"FRAC_2_PI","","2/π",82,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",82,null],[18,"SQRT_2","","sqrt(2)",82,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",82,null],[18,"E","","Euler\'s number (e)",82,null],[18,"LOG2_E","","log2(e)",82,null],[18,"LOG10_E","","log10(e)",82,null],[18,"LN_2","","ln(2)",82,null],[18,"LN_10","","ln(10)",82,null],[11,"is_nan","","",82,[[],["m32x16",6]]],[11,"is_infinite","","",82,[[],["m32x16",6]]],[11,"is_finite","","",82,[[],["m32x16",6]]],[11,"abs","","Absolute value.",82,[[]]],[11,"cos","","Cosine.",82,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",82,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",82,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",82,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",82,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",82,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",82,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",82,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",82,[[]]],[11,"sin","","Sine.",82,[[]]],[11,"sin_pi","","Sine of `self * PI`.",82,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",82,[[]]],[11,"sqrt","","",82,[[]]],[11,"sqrte","","Square-root estimate.",82,[[]]],[11,"tanh","","Tanh.",82,[[]]],[11,"eq","","Lane-wise equality comparison.",82,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",82,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",82,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",82,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",82,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",82,[[],["m32x16",6]]],[11,"new","","Creates a new instance with each vector elements…",83,[[]]],[11,"lanes","","Returns the number of vector lanes.",83,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",83,[[]]],[11,"extract","","Extracts the value at `index`.",83,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",83,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",83,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",83,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",83,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",83,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",83,[[]]],[11,"all","","Are `all` vector lanes `true`?",83,[[]]],[11,"any","","Is `any` vector lane `true`?",83,[[]]],[11,"none","","Are `all` vector lanes `false`?",83,[[]]],[11,"eq","","Lane-wise equality comparison.",83,[[],["m32x16",6]]],[11,"ne","","Lane-wise inequality comparison.",83,[[],["m32x16",6]]],[11,"lt","","Lane-wise less-than comparison.",83,[[],["m32x16",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",83,[[],["m32x16",6]]],[11,"gt","","Lane-wise greater-than comparison.",83,[[],["m32x16",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",83,[[],["m32x16",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",83,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",83,[[],[["lexicographicallyordered",3],["m32x16",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",83,[[],[["lexicographicallyordered",3],["m32x16",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",83,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",83,[[]]],[11,"new","","Creates a new instance with each vector elements…",84,[[]]],[11,"lanes","","Returns the number of vector lanes.",84,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",84,[[]]],[11,"extract","","Extracts the value at `index`.",84,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",84,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",84,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",84,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",84,[[["i64x8",6]],["i64x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",84,[[["i64x8",6]],["i64x8",6]]],[11,"min","","Minimum of two vectors.",84,[[]]],[11,"max","","Maximum of two vectors.",84,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",84,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",84,[[]]],[11,"max_element","","Largest vector element value.",84,[[]]],[11,"min_element","","Smallest vector element value.",84,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",84,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",84,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",84,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",84,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",84,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",84,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",84,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",84,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",84,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",84,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",84,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",84,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",84,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",84,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",84,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",84,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",84,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",84,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",84,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",84,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",84,[[]]],[11,"eq","","Lane-wise equality comparison.",84,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",84,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",84,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",84,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",84,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",84,[[],["m64x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",84,[[],[["i64x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",84,[[],[["i64x8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",84,[[]]],[11,"new","","Creates a new instance with each vector elements…",85,[[]]],[11,"lanes","","Returns the number of vector lanes.",85,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",85,[[]]],[11,"extract","","Extracts the value at `index`.",85,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",85,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",85,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",85,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",85,[[["u64x8",6]],["u64x8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",85,[[["u64x8",6]],["u64x8",6]]],[11,"min","","Minimum of two vectors.",85,[[]]],[11,"max","","Maximum of two vectors.",85,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",85,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",85,[[]]],[11,"max_element","","Largest vector element value.",85,[[]]],[11,"min_element","","Smallest vector element value.",85,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",85,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",85,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",85,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",85,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",85,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",85,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",85,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",85,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",85,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",85,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",85,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",85,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",85,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",85,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",85,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",85,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",85,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",85,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",85,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",85,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",85,[[]]],[11,"eq","","Lane-wise equality comparison.",85,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",85,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",85,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",85,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",85,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",85,[[],["m64x8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",85,[[],[["lexicographicallyordered",3],["u64x8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",85,[[],[["lexicographicallyordered",3],["u64x8",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",85,[[]]],[11,"new","","Creates a new instance with each vector elements…",86,[[]]],[11,"lanes","","Returns the number of vector lanes.",86,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",86,[[]]],[11,"extract","","Extracts the value at `index`.",86,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",86,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",86,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",86,[[]]],[11,"min","","Minimum of two vectors.",86,[[]]],[11,"max","","Maximum of two vectors.",86,[[]]],[11,"sum","","Horizontal sum of the vector elements.",86,[[]]],[11,"product","","Horizontal product of the vector elements.",86,[[]]],[11,"max_element","","Largest vector element value.",86,[[]]],[11,"min_element","","Smallest vector element value.",86,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",86,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",86,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",86,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",86,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",86,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",86,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",86,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",86,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",86,[[]]],[18,"EPSILON","","Machine epsilon value.",86,null],[18,"MIN","","Smallest finite value.",86,null],[18,"MIN_POSITIVE","","Smallest positive normal value.",86,null],[18,"MAX","","Largest finite value.",86,null],[18,"NAN","","Not a Number (NaN).",86,null],[18,"INFINITY","","Infinity (∞).",86,null],[18,"NEG_INFINITY","","Negative infinity (-∞).",86,null],[18,"PI","","Archimedes\' constant (π)",86,null],[18,"FRAC_PI_2","","π/2",86,null],[18,"FRAC_PI_3","","π/3",86,null],[18,"FRAC_PI_4","","π/4",86,null],[18,"FRAC_PI_6","","π/6",86,null],[18,"FRAC_PI_8","","π/8",86,null],[18,"FRAC_1_PI","","1/π",86,null],[18,"FRAC_2_PI","","2/π",86,null],[18,"FRAC_2_SQRT_PI","","2/sqrt(π)",86,null],[18,"SQRT_2","","sqrt(2)",86,null],[18,"FRAC_1_SQRT_2","","1/sqrt(2)",86,null],[18,"E","","Euler\'s number (e)",86,null],[18,"LOG2_E","","log2(e)",86,null],[18,"LOG10_E","","log10(e)",86,null],[18,"LN_2","","ln(2)",86,null],[18,"LN_10","","ln(10)",86,null],[11,"is_nan","","",86,[[],["m64x8",6]]],[11,"is_infinite","","",86,[[],["m64x8",6]]],[11,"is_finite","","",86,[[],["m64x8",6]]],[11,"abs","","Absolute value.",86,[[]]],[11,"cos","","Cosine.",86,[[]]],[11,"cos_pi","","Cosine of `self * PI`.",86,[[]]],[11,"exp","","Returns the exponential function of `self`: `e^(self)`.",86,[[]]],[11,"ln","","Returns the natural logarithm of `self`.",86,[[]]],[11,"mul_add","","Fused multiply add: `self * y + z`",86,[[]]],[11,"mul_adde","","Fused multiply add estimate: ~= `self * y + z`",86,[[]]],[11,"powf","","Raises `self` number to the floating point power of `x`.",86,[[]]],[11,"recpre","","Reciprocal estimate: `~= 1. / self`.",86,[[]]],[11,"rsqrte","","Reciprocal square-root estimate: `~= 1. / self.sqrt()`.",86,[[]]],[11,"sin","","Sine.",86,[[]]],[11,"sin_pi","","Sine of `self * PI`.",86,[[]]],[11,"sin_cos_pi","","Sine and cosine of `self * PI`.",86,[[]]],[11,"sqrt","","",86,[[]]],[11,"sqrte","","Square-root estimate.",86,[[]]],[11,"tanh","","Tanh.",86,[[]]],[11,"eq","","Lane-wise equality comparison.",86,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",86,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",86,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",86,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",86,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",86,[[],["m64x8",6]]],[11,"new","","Creates a new instance with each vector elements…",87,[[]]],[11,"lanes","","Returns the number of vector lanes.",87,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",87,[[]]],[11,"extract","","Extracts the value at `index`.",87,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",87,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",87,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",87,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",87,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",87,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",87,[[]]],[11,"all","","Are `all` vector lanes `true`?",87,[[]]],[11,"any","","Is `any` vector lane `true`?",87,[[]]],[11,"none","","Are `all` vector lanes `false`?",87,[[]]],[11,"eq","","Lane-wise equality comparison.",87,[[],["m64x8",6]]],[11,"ne","","Lane-wise inequality comparison.",87,[[],["m64x8",6]]],[11,"lt","","Lane-wise less-than comparison.",87,[[],["m64x8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",87,[[],["m64x8",6]]],[11,"gt","","Lane-wise greater-than comparison.",87,[[],["m64x8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",87,[[],["m64x8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",87,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",87,[[],[["m64x8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",87,[[],[["m64x8",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",87,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",87,[[]]],[11,"new","","Creates a new instance with each vector elements…",88,[[]]],[11,"lanes","","Returns the number of vector lanes.",88,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",88,[[]]],[11,"extract","","Extracts the value at `index`.",88,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",88,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",88,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",88,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",88,[[["i128x4",6]],["i128x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",88,[[["i128x4",6]],["i128x4",6]]],[11,"min","","Minimum of two vectors.",88,[[]]],[11,"max","","Maximum of two vectors.",88,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",88,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",88,[[]]],[11,"max_element","","Largest vector element value.",88,[[]]],[11,"min_element","","Smallest vector element value.",88,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",88,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",88,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",88,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",88,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",88,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",88,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",88,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",88,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",88,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",88,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",88,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",88,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",88,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",88,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",88,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",88,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",88,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",88,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",88,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",88,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",88,[[]]],[11,"eq","","Lane-wise equality comparison.",88,[[],["m128x4",6]]],[11,"ne","","Lane-wise inequality comparison.",88,[[],["m128x4",6]]],[11,"lt","","Lane-wise less-than comparison.",88,[[],["m128x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",88,[[],["m128x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",88,[[],["m128x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",88,[[],["m128x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",88,[[],[["i128x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",88,[[],[["i128x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",88,[[]]],[11,"new","","Creates a new instance with each vector elements…",89,[[]]],[11,"lanes","","Returns the number of vector lanes.",89,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",89,[[]]],[11,"extract","","Extracts the value at `index`.",89,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",89,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",89,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",89,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",89,[[["u128x4",6]],["u128x4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",89,[[["u128x4",6]],["u128x4",6]]],[11,"min","","Minimum of two vectors.",89,[[]]],[11,"max","","Maximum of two vectors.",89,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",89,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",89,[[]]],[11,"max_element","","Largest vector element value.",89,[[]]],[11,"min_element","","Smallest vector element value.",89,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",89,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",89,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",89,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",89,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",89,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",89,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",89,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",89,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",89,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",89,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",89,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",89,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",89,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",89,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",89,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",89,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",89,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",89,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",89,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",89,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",89,[[]]],[11,"eq","","Lane-wise equality comparison.",89,[[],["m128x4",6]]],[11,"ne","","Lane-wise inequality comparison.",89,[[],["m128x4",6]]],[11,"lt","","Lane-wise less-than comparison.",89,[[],["m128x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",89,[[],["m128x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",89,[[],["m128x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",89,[[],["m128x4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",89,[[],[["u128x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",89,[[],[["u128x4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",89,[[]]],[11,"new","","Creates a new instance with each vector elements…",90,[[]]],[11,"lanes","","Returns the number of vector lanes.",90,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",90,[[]]],[11,"extract","","Extracts the value at `index`.",90,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",90,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",90,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",90,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",90,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",90,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",90,[[]]],[11,"all","","Are `all` vector lanes `true`?",90,[[]]],[11,"any","","Is `any` vector lane `true`?",90,[[]]],[11,"none","","Are `all` vector lanes `false`?",90,[[]]],[11,"eq","","Lane-wise equality comparison.",90,[[],["m128x4",6]]],[11,"ne","","Lane-wise inequality comparison.",90,[[],["m128x4",6]]],[11,"lt","","Lane-wise less-than comparison.",90,[[],["m128x4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",90,[[],["m128x4",6]]],[11,"gt","","Lane-wise greater-than comparison.",90,[[],["m128x4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",90,[[],["m128x4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",90,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",90,[[],[["m128x4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",90,[[],[["m128x4",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",90,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",90,[[]]],[11,"new","","Creates a new instance with each vector elements…",48,[[]]],[11,"lanes","","Returns the number of vector lanes.",48,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",48,[[]]],[11,"extract","","Extracts the value at `index`.",48,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",48,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",48,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",48,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",48,[[["isizex2",6]],["isizex2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",48,[[["isizex2",6]],["isizex2",6]]],[11,"min","","Minimum of two vectors.",48,[[]]],[11,"max","","Maximum of two vectors.",48,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",48,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",48,[[]]],[11,"max_element","","Largest vector element value.",48,[[]]],[11,"min_element","","Smallest vector element value.",48,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",48,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",48,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",48,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",48,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",48,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",48,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",48,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",48,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",48,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",48,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",48,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",48,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",48,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",48,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",48,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",48,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",48,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",48,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",48,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",48,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",48,[[]]],[11,"eq","","Lane-wise equality comparison.",48,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",48,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",48,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",48,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",48,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",48,[[],["msizex2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",48,[[],[["isizex2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",48,[[],[["isizex2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",48,[[]]],[11,"new","","Creates a new instance with each vector elements…",49,[[]]],[11,"lanes","","Returns the number of vector lanes.",49,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",49,[[]]],[11,"extract","","Extracts the value at `index`.",49,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",49,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",49,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",49,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",49,[[["usizex2",6]],["usizex2",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",49,[[["usizex2",6]],["usizex2",6]]],[11,"min","","Minimum of two vectors.",49,[[]]],[11,"max","","Maximum of two vectors.",49,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",49,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",49,[[]]],[11,"max_element","","Largest vector element value.",49,[[]]],[11,"min_element","","Smallest vector element value.",49,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",49,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",49,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",49,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",49,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",49,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",49,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",49,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",49,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",49,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",49,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",49,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",49,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",49,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",49,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",49,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",49,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",49,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",49,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",49,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",49,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",49,[[]]],[11,"eq","","Lane-wise equality comparison.",49,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",49,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",49,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",49,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",49,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",49,[[],["msizex2",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",49,[[],[["usizex2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",49,[[],[["usizex2",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",49,[[]]],[11,"new","","Creates a new instance with each vector elements…",50,[[]]],[11,"lanes","","Returns the number of vector lanes.",50,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",50,[[]]],[11,"extract","","Extracts the value at `index`.",50,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",50,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",50,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",50,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",50,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",50,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",50,[[]]],[11,"all","","Are `all` vector lanes `true`?",50,[[]]],[11,"any","","Is `any` vector lane `true`?",50,[[]]],[11,"none","","Are `all` vector lanes `false`?",50,[[]]],[11,"eq","","Lane-wise equality comparison.",50,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",50,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",50,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",50,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",50,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",50,[[],["msizex2",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",50,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",50,[[],[["msizex2",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",50,[[],[["msizex2",6],["lexicographicallyordered",3]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",50,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",50,[[]]],[11,"new","","Creates a new instance with each vector elements…",71,[[]]],[11,"lanes","","Returns the number of vector lanes.",71,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",71,[[]]],[11,"extract","","Extracts the value at `index`.",71,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",71,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",71,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",71,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",71,[[["isizex4",6]],["isizex4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",71,[[["isizex4",6]],["isizex4",6]]],[11,"min","","Minimum of two vectors.",71,[[]]],[11,"max","","Maximum of two vectors.",71,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",71,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",71,[[]]],[11,"max_element","","Largest vector element value.",71,[[]]],[11,"min_element","","Smallest vector element value.",71,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",71,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",71,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",71,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",71,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",71,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",71,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",71,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",71,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",71,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",71,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",71,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",71,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",71,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",71,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",71,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",71,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",71,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",71,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",71,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",71,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",71,[[]]],[11,"eq","","Lane-wise equality comparison.",71,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",71,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",71,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",71,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",71,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",71,[[],["msizex4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",71,[[],[["isizex4",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",71,[[],[["isizex4",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",71,[[]]],[11,"new","","Creates a new instance with each vector elements…",72,[[]]],[11,"lanes","","Returns the number of vector lanes.",72,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",72,[[]]],[11,"extract","","Extracts the value at `index`.",72,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",72,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",72,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",72,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",72,[[["usizex4",6]],["usizex4",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",72,[[["usizex4",6]],["usizex4",6]]],[11,"min","","Minimum of two vectors.",72,[[]]],[11,"max","","Maximum of two vectors.",72,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",72,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",72,[[]]],[11,"max_element","","Largest vector element value.",72,[[]]],[11,"min_element","","Smallest vector element value.",72,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",72,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",72,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",72,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",72,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",72,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",72,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",72,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",72,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",72,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",72,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",72,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",72,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",72,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",72,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",72,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",72,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",72,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",72,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",72,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",72,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",72,[[]]],[11,"eq","","Lane-wise equality comparison.",72,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",72,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",72,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",72,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",72,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",72,[[],["msizex4",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",72,[[],[["lexicographicallyordered",3],["usizex4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",72,[[],[["lexicographicallyordered",3],["usizex4",6]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",72,[[]]],[11,"new","","Creates a new instance with each vector elements…",73,[[]]],[11,"lanes","","Returns the number of vector lanes.",73,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",73,[[]]],[11,"extract","","Extracts the value at `index`.",73,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",73,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",73,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",73,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",73,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",73,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",73,[[]]],[11,"all","","Are `all` vector lanes `true`?",73,[[]]],[11,"any","","Is `any` vector lane `true`?",73,[[]]],[11,"none","","Are `all` vector lanes `false`?",73,[[]]],[11,"eq","","Lane-wise equality comparison.",73,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",73,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",73,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",73,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",73,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",73,[[],["msizex4",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",73,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",73,[[],[["lexicographicallyordered",3],["msizex4",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",73,[[],[["lexicographicallyordered",3],["msizex4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",73,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",73,[[]]],[11,"new","","Creates a new instance with each vector elements…",91,[[]]],[11,"lanes","","Returns the number of vector lanes.",91,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",91,[[]]],[11,"extract","","Extracts the value at `index`.",91,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",91,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",91,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",91,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",91,[[["isizex8",6]],["isizex8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",91,[[["isizex8",6]],["isizex8",6]]],[11,"min","","Minimum of two vectors.",91,[[]]],[11,"max","","Maximum of two vectors.",91,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",91,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",91,[[]]],[11,"max_element","","Largest vector element value.",91,[[]]],[11,"min_element","","Smallest vector element value.",91,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",91,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",91,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",91,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",91,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",91,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",91,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",91,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",91,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",91,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",91,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",91,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",91,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",91,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",91,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",91,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",91,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",91,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",91,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",91,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",91,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",91,[[]]],[11,"eq","","Lane-wise equality comparison.",91,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",91,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",91,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",91,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",91,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",91,[[],["msizex8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",91,[[],[["isizex8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",91,[[],[["isizex8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",91,[[]]],[11,"new","","Creates a new instance with each vector elements…",92,[[]]],[11,"lanes","","Returns the number of vector lanes.",92,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",92,[[]]],[11,"extract","","Extracts the value at `index`.",92,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",92,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",92,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",92,[[]]],[11,"rotate_left","","Shifts the bits of each lane to the left by the specified…",92,[[["usizex8",6]],["usizex8",6]]],[11,"rotate_right","","Shifts the bits of each lane to the right by the specified…",92,[[["usizex8",6]],["usizex8",6]]],[11,"min","","Minimum of two vectors.",92,[[]]],[11,"max","","Maximum of two vectors.",92,[[]]],[11,"wrapping_sum","","Horizontal wrapping sum of the vector elements.",92,[[]]],[11,"wrapping_product","","Horizontal wrapping product of the vector elements.",92,[[]]],[11,"max_element","","Largest vector element value.",92,[[]]],[11,"min_element","","Smallest vector element value.",92,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",92,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",92,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",92,[[]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",92,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",92,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",92,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",92,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",92,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",92,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",92,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",92,[[]]],[11,"swap_bytes","","Reverses the byte order of the vector.",92,[[]]],[11,"to_le","","Converts self to little endian from the target\'s endianness.",92,[[]]],[11,"to_be","","Converts self to big endian from the target\'s endianness.",92,[[]]],[11,"from_le","","Converts a vector from little endian to the target\'s…",92,[[]]],[11,"from_be","","Converts a vector from big endian to the target\'s…",92,[[]]],[11,"count_ones","","Returns the number of ones in the binary representation of…",92,[[]]],[11,"count_zeros","","Returns the number of zeros in the binary representation…",92,[[]]],[11,"leading_zeros","","Returns the number of leading zeros in the binary…",92,[[]]],[11,"trailing_zeros","","Returns the number of trailing zeros in the binary…",92,[[]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",92,[[]]],[11,"eq","","Lane-wise equality comparison.",92,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",92,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",92,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",92,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",92,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",92,[[],["msizex8",6]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",92,[[],[["usizex8",6],["lexicographicallyordered",3]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",92,[[],[["usizex8",6],["lexicographicallyordered",3]]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",92,[[]]],[11,"new","","Creates a new instance with each vector elements…",93,[[]]],[11,"lanes","","Returns the number of vector lanes.",93,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",93,[[]]],[11,"extract","","Extracts the value at `index`.",93,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",93,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",93,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",93,[[]]],[11,"and","","Lane-wise bitwise `and` of the vector elements.",93,[[]]],[11,"or","","Lane-wise bitwise `or` of the vector elements.",93,[[]]],[11,"xor","","Lane-wise bitwise `xor` of the vector elements.",93,[[]]],[11,"all","","Are `all` vector lanes `true`?",93,[[]]],[11,"any","","Is `any` vector lane `true`?",93,[[]]],[11,"none","","Are `all` vector lanes `false`?",93,[[]]],[11,"eq","","Lane-wise equality comparison.",93,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",93,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",93,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",93,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",93,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",93,[[],["msizex8",6]]],[11,"select","","Selects elements of `a` and `b` using mask.",93,[[["simd",3]],["simd",3]]],[11,"partial_lex_ord","","Returns a wrapper that implements `PartialOrd`.",93,[[],[["lexicographicallyordered",3],["msizex8",6]]]],[11,"lex_ord","","Returns a wrapper that implements `Ord`.",93,[[],[["lexicographicallyordered",3],["msizex8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",93,[[]]],[11,"bitmask","","Creates a bitmask with the MSB of each vector lane.",93,[[]]],[11,"new","","Creates a new instance with each vector elements…",94,[[]]],[11,"lanes","","Returns the number of vector lanes.",94,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",94,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",94,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",94,[[],["msizex2",6]]],[11,"extract","","Extracts the value at `index`.",94,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",94,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",94,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",94,[[]]],[11,"eq","","Lane-wise equality comparison.",94,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",94,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",94,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",94,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",94,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",94,[[],["msizex2",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",94,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",94,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",94,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",94,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",94,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",94,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",94,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",94,[[]]],[11,"offset","","Calculates the offset from a pointer.",94,[[["isizex2",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",94,[[["isizex2",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",94,[[],["isizex2",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",94,[[],["isizex2",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",94,[[["usizex2",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",94,[[["usizex2",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",94,[[["usizex2",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",94,[[["usizex2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",94,[[]]],[11,"read","","Reads selected vector elements from memory.",94,[[["simd",3],["simd",3]],["simd",3]]],[11,"new","","Creates a new instance with each vector elements…",95,[[]]],[11,"lanes","","Returns the number of vector lanes.",95,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",95,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",95,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",95,[[],["msizex2",6]]],[11,"extract","","Extracts the value at `index`.",95,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",95,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",95,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",95,[[]]],[11,"eq","","Lane-wise equality comparison.",95,[[],["msizex2",6]]],[11,"ne","","Lane-wise inequality comparison.",95,[[],["msizex2",6]]],[11,"lt","","Lane-wise less-than comparison.",95,[[],["msizex2",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",95,[[],["msizex2",6]]],[11,"gt","","Lane-wise greater-than comparison.",95,[[],["msizex2",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",95,[[],["msizex2",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",95,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",95,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",95,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",95,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",95,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",95,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",95,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",95,[[]]],[11,"offset","","Calculates the offset from a pointer.",95,[[["isizex2",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",95,[[["isizex2",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",95,[[],["isizex2",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",95,[[],["isizex2",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",95,[[["usizex2",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",95,[[["usizex2",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",95,[[["usizex2",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",95,[[["usizex2",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",95,[[]]],[11,"read","","Reads selected vector elements from memory.",95,[[["simd",3],["simd",3]],["simd",3]]],[11,"write","","Writes selected vector elements to memory.",95,[[["simd",3],["simd",3]]]],[11,"new","","Creates a new instance with each vector elements…",96,[[]]],[11,"lanes","","Returns the number of vector lanes.",96,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",96,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",96,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",96,[[],["msizex4",6]]],[11,"extract","","Extracts the value at `index`.",96,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",96,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",96,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",96,[[]]],[11,"eq","","Lane-wise equality comparison.",96,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",96,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",96,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",96,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",96,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",96,[[],["msizex4",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",96,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",96,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",96,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",96,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",96,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",96,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",96,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",96,[[]]],[11,"offset","","Calculates the offset from a pointer.",96,[[["isizex4",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",96,[[["isizex4",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",96,[[],["isizex4",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",96,[[],["isizex4",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",96,[[["usizex4",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",96,[[["usizex4",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",96,[[["usizex4",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",96,[[["usizex4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",96,[[]]],[11,"read","","Reads selected vector elements from memory.",96,[[["simd",3],["simd",3]],["simd",3]]],[11,"new","","Creates a new instance with each vector elements…",97,[[]]],[11,"lanes","","Returns the number of vector lanes.",97,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",97,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",97,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",97,[[],["msizex4",6]]],[11,"extract","","Extracts the value at `index`.",97,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",97,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",97,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",97,[[]]],[11,"eq","","Lane-wise equality comparison.",97,[[],["msizex4",6]]],[11,"ne","","Lane-wise inequality comparison.",97,[[],["msizex4",6]]],[11,"lt","","Lane-wise less-than comparison.",97,[[],["msizex4",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",97,[[],["msizex4",6]]],[11,"gt","","Lane-wise greater-than comparison.",97,[[],["msizex4",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",97,[[],["msizex4",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",97,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",97,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",97,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",97,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",97,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",97,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",97,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",97,[[]]],[11,"offset","","Calculates the offset from a pointer.",97,[[["isizex4",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",97,[[["isizex4",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",97,[[],["isizex4",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",97,[[],["isizex4",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",97,[[["usizex4",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",97,[[["usizex4",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",97,[[["usizex4",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",97,[[["usizex4",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",97,[[]]],[11,"read","","Reads selected vector elements from memory.",97,[[["simd",3],["simd",3]],["simd",3]]],[11,"write","","Writes selected vector elements to memory.",97,[[["simd",3],["simd",3]]]],[11,"new","","Creates a new instance with each vector elements…",98,[[]]],[11,"lanes","","Returns the number of vector lanes.",98,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",98,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",98,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",98,[[],["msizex8",6]]],[11,"extract","","Extracts the value at `index`.",98,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",98,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",98,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",98,[[]]],[11,"eq","","Lane-wise equality comparison.",98,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",98,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",98,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",98,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",98,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",98,[[],["msizex8",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",98,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",98,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",98,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",98,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",98,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",98,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",98,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",98,[[]]],[11,"offset","","Calculates the offset from a pointer.",98,[[["isizex8",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",98,[[["isizex8",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",98,[[],["isizex8",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",98,[[],["isizex8",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",98,[[["usizex8",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",98,[[["usizex8",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",98,[[["usizex8",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",98,[[["usizex8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",98,[[]]],[11,"read","","Reads selected vector elements from memory.",98,[[["simd",3],["simd",3]],["simd",3]]],[11,"new","","Creates a new instance with each vector elements…",99,[[]]],[11,"lanes","","Returns the number of vector lanes.",99,[[]]],[11,"splat","","Constructs a new instance with each element initialized to…",99,[[]]],[11,"null","","Constructs a new instance with each element initialized to…",99,[[]]],[11,"is_null","","Returns a mask that selects those lanes that contain…",99,[[],["msizex8",6]]],[11,"extract","","Extracts the value at `index`.",99,[[]]],[11,"extract_unchecked","","Extracts the value at `index`.",99,[[]]],[11,"replace","","Returns a new vector where the value at `index` is…",99,[[]]],[11,"replace_unchecked","","Returns a new vector where the value at `index` is…",99,[[]]],[11,"eq","","Lane-wise equality comparison.",99,[[],["msizex8",6]]],[11,"ne","","Lane-wise inequality comparison.",99,[[],["msizex8",6]]],[11,"lt","","Lane-wise less-than comparison.",99,[[],["msizex8",6]]],[11,"le","","Lane-wise less-than-or-equals comparison.",99,[[],["msizex8",6]]],[11,"gt","","Lane-wise greater-than comparison.",99,[[],["msizex8",6]]],[11,"ge","","Lane-wise greater-than-or-equals comparison.",99,[[],["msizex8",6]]],[11,"from_slice_aligned","","Instantiates a new vector with the values of the `slice`.",99,[[]]],[11,"from_slice_unaligned","","Instantiates a new vector with the values of the `slice`.",99,[[]]],[11,"from_slice_aligned_unchecked","","Instantiates a new vector with the values of the `slice`.",99,[[]]],[11,"from_slice_unaligned_unchecked","","Instantiates a new vector with the values of the `slice`.",99,[[]]],[11,"write_to_slice_aligned","","Writes the values of the vector to the `slice`.",99,[[]]],[11,"write_to_slice_unaligned","","Writes the values of the vector to the `slice`.",99,[[]]],[11,"write_to_slice_aligned_unchecked","","Writes the values of the vector to the `slice`.",99,[[]]],[11,"write_to_slice_unaligned_unchecked","","Writes the values of the vector to the `slice`.",99,[[]]],[11,"offset","","Calculates the offset from a pointer.",99,[[["isizex8",6]]]],[11,"wrapping_offset","","Calculates the offset from a pointer using wrapping…",99,[[["isizex8",6]]]],[11,"offset_from","","Calculates the distance between two pointers.",99,[[],["isizex8",6]]],[11,"wrapping_offset_from","","Calculates the distance between two pointers.",99,[[],["isizex8",6]]],[11,"add","","Calculates the offset from a pointer (convenience for…",99,[[["usizex8",6]]]],[11,"sub","","Calculates the offset from a pointer (convenience for…",99,[[["usizex8",6]]]],[11,"wrapping_add","","Calculates the offset from a pointer using wrapping…",99,[[["usizex8",6]]]],[11,"wrapping_sub","","Calculates the offset from a pointer using wrapping…",99,[[["usizex8",6]]]],[11,"shuffle1_dyn","","Shuffle vector elements according to `indices`.",99,[[]]],[11,"read","","Reads selected vector elements from memory.",99,[[["simd",3],["simd",3]],["simd",3]]],[11,"write","","Writes selected vector elements to memory.",99,[[["simd",3],["simd",3]]]]],"p":[[3,"m8"],[3,"m16"],[3,"m32"],[3,"m64"],[3,"m128"],[3,"msize"],[3,"Simd"],[8,"SimdVector"],[8,"SimdArray"],[8,"Mask"],[8,"FromCast"],[8,"Cast"],[8,"FromBits"],[8,"IntoBits"],[3,"LexicographicallyOrdered"],[6,"i8x2"],[6,"u8x2"],[6,"m8x2"],[6,"i8x4"],[6,"u8x4"],[6,"m8x4"],[6,"i16x2"],[6,"u16x2"],[6,"m16x2"],[6,"i8x8"],[6,"u8x8"],[6,"m8x8"],[6,"i16x4"],[6,"u16x4"],[6,"m16x4"],[6,"i32x2"],[6,"u32x2"],[6,"f32x2"],[6,"m32x2"],[6,"i8x16"],[6,"u8x16"],[6,"m8x16"],[6,"i16x8"],[6,"u16x8"],[6,"m16x8"],[6,"i32x4"],[6,"u32x4"],[6,"f32x4"],[6,"m32x4"],[6,"i64x2"],[6,"u64x2"],[6,"f64x2"],[6,"m64x2"],[6,"isizex2"],[6,"usizex2"],[6,"msizex2"],[6,"i128x1"],[6,"u128x1"],[6,"m128x1"],[6,"i8x32"],[6,"u8x32"],[6,"m8x32"],[6,"i16x16"],[6,"u16x16"],[6,"m16x16"],[6,"i32x8"],[6,"u32x8"],[6,"f32x8"],[6,"m32x8"],[6,"i64x4"],[6,"u64x4"],[6,"f64x4"],[6,"m64x4"],[6,"i128x2"],[6,"u128x2"],[6,"m128x2"],[6,"isizex4"],[6,"usizex4"],[6,"msizex4"],[6,"i8x64"],[6,"u8x64"],[6,"m8x64"],[6,"i16x32"],[6,"u16x32"],[6,"m16x32"],[6,"i32x16"],[6,"u32x16"],[6,"f32x16"],[6,"m32x16"],[6,"i64x8"],[6,"u64x8"],[6,"f64x8"],[6,"m64x8"],[6,"i128x4"],[6,"u128x4"],[6,"m128x4"],[6,"isizex8"],[6,"usizex8"],[6,"msizex8"],[6,"cptrx2"],[6,"mptrx2"],[6,"cptrx4"],[6,"mptrx4"],[6,"cptrx8"],[6,"mptrx8"]]}\ +}'); +addSearchOptions(searchIndex);initSearch(searchIndex); \ No newline at end of file diff --git a/settings.css b/settings.css new file mode 100644 index 000000000..6ce766586 --- /dev/null +++ b/settings.css @@ -0,0 +1 @@ +.setting-line{padding:5px;position:relative;}.setting-line>div{max-width:calc(100% - 74px);display:inline-block;vertical-align:top;font-size:17px;padding-top:2px;}.setting-line>.title{font-size:19px;width:100%;max-width:none;border-bottom:1px solid;}.toggle{position:relative;display:inline-block;width:45px;height:27px;margin-right:20px;}.toggle input{display:none;}.slider{position:absolute;cursor:pointer;top:0;left:0;right:0;bottom:0;background-color:#ccc;-webkit-transition:.3s;transition:.3s;}.slider:before{position:absolute;content:"";height:19px;width:19px;left:4px;bottom:4px;background-color:white;-webkit-transition:.3s;transition:.3s;}input:checked+.slider{background-color:#2196F3;}input:focus+.slider{box-shadow:0 0 1px #2196F3;}input:checked+.slider:before{-webkit-transform:translateX(19px);-ms-transform:translateX(19px);transform:translateX(19px);}.setting-line>.sub-settings{padding-left:42px;width:100%;display:block;} \ No newline at end of file diff --git a/settings.html b/settings.html new file mode 100644 index 000000000..4605d1e92 --- /dev/null +++ b/settings.html @@ -0,0 +1,2 @@ +Rustdoc settings

Rustdoc settings

Auto-hide item declarations
Auto-hide structs declaration
Auto-hide enums declaration
Auto-hide unions declaration
Auto-hide traits declaration
Auto-hide macros declaration
+
Auto-hide item attributes.
Auto-hide item methods' documentation
Auto-hide trait implementations documentation
Directly go to item in search if there is only one result
Show line numbers on code examples
Disable keyboard shortcuts
\ No newline at end of file diff --git a/settings.js b/settings.js new file mode 100644 index 000000000..993030905 --- /dev/null +++ b/settings.js @@ -0,0 +1 @@ +(function(){function changeSetting(settingName,isEnabled){updateLocalStorage('rustdoc-'+settingName,isEnabled)}function getSettingValue(settingName){return getCurrentValue('rustdoc-'+settingName)}function setEvents(){var elems=document.getElementsByClassName("slider");if(!elems||elems.length===0){return}for(var i=0;i"){sidebar.style.left="";this.style.left="";child.innerText="<";updateLocalStorage("rustdoc-source-sidebar-show","true")}else{sidebar.style.left="-300px";this.style.left="0";child.innerText=">";updateLocalStorage("rustdoc-source-sidebar-show","false")}}function createSidebarToggle(){var sidebarToggle=document.createElement("div");sidebarToggle.id="sidebar-toggle";sidebarToggle.onclick=toggleSidebar;var inner1=document.createElement("div");inner1.style.position="relative";var inner2=document.createElement("div");inner2.style.paddingTop="3px";if(getCurrentValue("rustdoc-source-sidebar-show")==="true"){inner2.innerText="<"}else{inner2.innerText=">";sidebarToggle.style.left="0"}inner1.appendChild(inner2);sidebarToggle.appendChild(inner1);return sidebarToggle}function createSourceSidebar(){if(window.rootPath.endsWith("/")===false){window.rootPath+="/"}var main=document.getElementById("main");var sidebarToggle=createSidebarToggle();main.insertBefore(sidebarToggle,main.firstChild);var sidebar=document.createElement("div");sidebar.id="source-sidebar";if(getCurrentValue("rustdoc-source-sidebar-show")!=="true"){sidebar.style.left="-300px"}var currentFile=getCurrentFilePath();var hasFoundFile=false;var title=document.createElement("div");title.className="title";title.innerText="Files";sidebar.appendChild(title);Object.keys(sourcesIndex).forEach(function(key){sourcesIndex[key].name=key;hasFoundFile=createDirEntry(sourcesIndex[key],sidebar,"",currentFile,hasFoundFile)});main.insertBefore(sidebar,main.firstChild)} \ No newline at end of file diff --git a/src/cfg_if/lib.rs.html b/src/cfg_if/lib.rs.html new file mode 100644 index 000000000..6a712eb27 --- /dev/null +++ b/src/cfg_if/lib.rs.html @@ -0,0 +1,355 @@ +lib.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+
+//! A macro for defining `#[cfg]` if-else statements.
+//!
+//! The macro provided by this crate, `cfg_if`, is similar to the `if/elif` C
+//! preprocessor macro by allowing definition of a cascade of `#[cfg]` cases,
+//! emitting the implementation which matches first.
+//!
+//! This allows you to conveniently provide a long list `#[cfg]`'d blocks of code
+//! without having to rewrite each clause multiple times.
+//!
+//! # Example
+//!
+//! ```
+//! cfg_if::cfg_if! {
+//!     if #[cfg(unix)] {
+//!         fn foo() { /* unix specific functionality */ }
+//!     } else if #[cfg(target_pointer_width = "32")] {
+//!         fn foo() { /* non-unix, 32-bit functionality */ }
+//!     } else {
+//!         fn foo() { /* fallback implementation */ }
+//!     }
+//! }
+//!
+//! # fn main() {}
+//! ```
+
+#![no_std]
+#![doc(html_root_url = "https://docs.rs/cfg-if")]
+#![deny(missing_docs)]
+#![cfg_attr(test, deny(warnings))]
+
+/// The main macro provided by this crate. See crate documentation for more
+/// information.
+#[macro_export]
+macro_rules! cfg_if {
+    // match if/else chains with a final `else`
+    ($(
+        if #[cfg($($meta:meta),*)] { $($tokens:tt)* }
+    ) else * else {
+        $($tokens2:tt)*
+    }) => {
+        $crate::cfg_if! {
+            @__items
+            () ;
+            $( ( ($($meta),*) ($($tokens)*) ), )*
+            ( () ($($tokens2)*) ),
+        }
+    };
+
+    // match if/else chains lacking a final `else`
+    (
+        if #[cfg($($i_met:meta),*)] { $($i_tokens:tt)* }
+        $(
+            else if #[cfg($($e_met:meta),*)] { $($e_tokens:tt)* }
+        )*
+    ) => {
+        $crate::cfg_if! {
+            @__items
+            () ;
+            ( ($($i_met),*) ($($i_tokens)*) ),
+            $( ( ($($e_met),*) ($($e_tokens)*) ), )*
+            ( () () ),
+        }
+    };
+
+    // Internal and recursive macro to emit all the items
+    //
+    // Collects all the negated cfgs in a list at the beginning and after the
+    // semicolon is all the remaining items
+    (@__items ($($not:meta,)*) ; ) => {};
+    (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($tokens:tt)*) ), $($rest:tt)*) => {
+        // Emit all items within one block, applying an appropriate #[cfg]. The
+        // #[cfg] will require all `$m` matchers specified and must also negate
+        // all previous matchers.
+        #[cfg(all($($m,)* not(any($($not),*))))] $crate::cfg_if! { @__identity $($tokens)* }
+
+        // Recurse to emit all other items in `$rest`, and when we do so add all
+        // our `$m` matchers to the list of `$not` matchers as future emissions
+        // will have to negate everything we just matched as well.
+        $crate::cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* }
+    };
+
+    // Internal macro to make __apply work out right for different match types,
+    // because of how macros matching/expand stuff.
+    (@__identity $($tokens:tt)*) => {
+        $($tokens)*
+    };
+}
+
+#[cfg(test)]
+mod tests {
+    cfg_if! {
+        if #[cfg(test)] {
+            use core::option::Option as Option2;
+            fn works1() -> Option2<u32> { Some(1) }
+        } else {
+            fn works1() -> Option<u32> { None }
+        }
+    }
+
+    cfg_if! {
+        if #[cfg(foo)] {
+            fn works2() -> bool { false }
+        } else if #[cfg(test)] {
+            fn works2() -> bool { true }
+        } else {
+            fn works2() -> bool { false }
+        }
+    }
+
+    cfg_if! {
+        if #[cfg(foo)] {
+            fn works3() -> bool { false }
+        } else {
+            fn works3() -> bool { true }
+        }
+    }
+
+    cfg_if! {
+        if #[cfg(test)] {
+            use core::option::Option as Option3;
+            fn works4() -> Option3<u32> { Some(1) }
+        }
+    }
+
+    cfg_if! {
+        if #[cfg(foo)] {
+            fn works5() -> bool { false }
+        } else if #[cfg(test)] {
+            fn works5() -> bool { true }
+        }
+    }
+
+    #[test]
+    fn it_works() {
+        assert!(works1().is_some());
+        assert!(works2());
+        assert!(works3());
+        assert!(works4().is_some());
+        assert!(works5());
+    }
+
+    #[test]
+    #[allow(clippy::assertions_on_constants)]
+    fn test_usage_within_a_function() {
+        cfg_if! {if #[cfg(debug_assertions)] {
+            // we want to put more than one thing here to make sure that they
+            // all get configured properly.
+            assert!(cfg!(debug_assertions));
+            assert_eq!(4, 2+2);
+        } else {
+            assert!(works1().is_some());
+            assert_eq!(10, 5+5);
+        }}
+    }
+
+    trait Trait {
+        fn blah(&self);
+    }
+
+    #[allow(dead_code)]
+    struct Struct;
+
+    impl Trait for Struct {
+        cfg_if! {
+            if #[cfg(feature = "blah")] {
+                fn blah(&self) {
+                    unimplemented!();
+                }
+            } else {
+                fn blah(&self) {
+                    unimplemented!();
+                }
+            }
+        }
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/lib.rs.html b/src/libm/lib.rs.html new file mode 100644 index 000000000..8cb0d5cf0 --- /dev/null +++ b/src/libm/lib.rs.html @@ -0,0 +1,1287 @@ +lib.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+635
+636
+637
+638
+639
+640
+641
+642
+
+//! libm in pure Rust
+//!
+//! # Usage
+//!
+//! You can use this crate in two ways:
+//!
+//! - By directly using its free functions, e.g. `libm::powf`.
+//!
+//! - By importing the `F32Ext` and / or `F64Ext` extension traits to add methods like `powf` to the
+//! `f32` and `f64` types. Then you'll be able to invoke math functions as methods, e.g. `x.sqrt()`.
+
+#![deny(warnings)]
+#![no_std]
+#![cfg_attr(
+    all(target_arch = "wasm32", not(feature = "stable")),
+    feature(core_intrinsics)
+)]
+
+mod math;
+
+use core::{f32, f64};
+
+pub use self::math::*;
+
+/// Approximate equality with 1 ULP of tolerance
+#[doc(hidden)]
+#[inline]
+pub fn _eqf(a: f32, b: f32) -> Result<(), u32> {
+    if a.is_nan() && b.is_nan() {
+        Ok(())
+    } else {
+        let err = (a.to_bits() as i32).wrapping_sub(b.to_bits() as i32).abs();
+
+        if err <= 1 {
+            Ok(())
+        } else {
+            Err(err as u32)
+        }
+    }
+}
+
+#[doc(hidden)]
+#[inline]
+pub fn _eq(a: f64, b: f64) -> Result<(), u64> {
+    if a.is_nan() && b.is_nan() {
+        Ok(())
+    } else {
+        let err = (a.to_bits() as i64).wrapping_sub(b.to_bits() as i64).abs();
+
+        if err <= 1 {
+            Ok(())
+        } else {
+            Err(err as u64)
+        }
+    }
+}
+
+/// Math support for `f32`
+///
+/// This trait is sealed and cannot be implemented outside of `libm`.
+pub trait F32Ext: private::Sealed + Sized {
+    fn floor(self) -> Self;
+
+    fn ceil(self) -> Self;
+
+    fn round(self) -> Self;
+
+    fn trunc(self) -> Self;
+
+    fn fdim(self, rhs: Self) -> Self;
+
+    fn fract(self) -> Self;
+
+    fn abs(self) -> Self;
+
+    // NOTE depends on unstable intrinsics::copysignf32
+    // fn signum(self) -> Self;
+
+    fn mul_add(self, a: Self, b: Self) -> Self;
+
+    fn div_euc(self, rhs: Self) -> Self;
+
+    fn mod_euc(self, rhs: Self) -> Self;
+
+    // NOTE depends on unstable intrinsics::powif32
+    // fn powi(self, n: i32) -> Self;
+
+    fn powf(self, n: Self) -> Self;
+
+    fn sqrt(self) -> Self;
+
+    fn exp(self) -> Self;
+
+    fn exp2(self) -> Self;
+
+    fn ln(self) -> Self;
+
+    fn log(self, base: Self) -> Self;
+
+    fn log2(self) -> Self;
+
+    fn log10(self) -> Self;
+
+    fn cbrt(self) -> Self;
+
+    fn hypot(self, other: Self) -> Self;
+
+    fn sin(self) -> Self;
+
+    fn cos(self) -> Self;
+
+    fn tan(self) -> Self;
+
+    fn asin(self) -> Self;
+
+    fn acos(self) -> Self;
+
+    fn atan(self) -> Self;
+
+    fn atan2(self, other: Self) -> Self;
+
+    fn sin_cos(self) -> (Self, Self);
+
+    fn exp_m1(self) -> Self;
+
+    fn ln_1p(self) -> Self;
+
+    fn sinh(self) -> Self;
+
+    fn cosh(self) -> Self;
+
+    fn tanh(self) -> Self;
+
+    fn asinh(self) -> Self;
+
+    fn acosh(self) -> Self;
+
+    fn atanh(self) -> Self;
+
+    fn min(self, other: Self) -> Self;
+
+    fn max(self, other: Self) -> Self;
+}
+
+impl F32Ext for f32 {
+    #[inline]
+    fn floor(self) -> Self {
+        floorf(self)
+    }
+
+    #[inline]
+    fn ceil(self) -> Self {
+        ceilf(self)
+    }
+
+    #[inline]
+    fn round(self) -> Self {
+        roundf(self)
+    }
+
+    #[inline]
+    fn trunc(self) -> Self {
+        truncf(self)
+    }
+
+    #[inline]
+    fn fdim(self, rhs: Self) -> Self {
+        fdimf(self, rhs)
+    }
+
+    #[inline]
+    fn fract(self) -> Self {
+        self - self.trunc()
+    }
+
+    #[inline]
+    fn abs(self) -> Self {
+        fabsf(self)
+    }
+
+    #[inline]
+    fn mul_add(self, a: Self, b: Self) -> Self {
+        fmaf(self, a, b)
+    }
+
+    #[inline]
+    fn div_euc(self, rhs: Self) -> Self {
+        let q = (self / rhs).trunc();
+        if self % rhs < 0.0 {
+            return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
+        }
+        q
+    }
+
+    #[inline]
+    fn mod_euc(self, rhs: f32) -> f32 {
+        let r = self % rhs;
+        if r < 0.0 {
+            r + rhs.abs()
+        } else {
+            r
+        }
+    }
+
+    #[inline]
+    fn powf(self, n: Self) -> Self {
+        powf(self, n)
+    }
+
+    #[inline]
+    fn sqrt(self) -> Self {
+        sqrtf(self)
+    }
+
+    #[inline]
+    fn exp(self) -> Self {
+        expf(self)
+    }
+
+    #[inline]
+    fn exp2(self) -> Self {
+        exp2f(self)
+    }
+
+    #[inline]
+    fn ln(self) -> Self {
+        logf(self)
+    }
+
+    #[inline]
+    fn log(self, base: Self) -> Self {
+        self.ln() / base.ln()
+    }
+
+    #[inline]
+    fn log2(self) -> Self {
+        log2f(self)
+    }
+
+    #[inline]
+    fn log10(self) -> Self {
+        log10f(self)
+    }
+
+    #[inline]
+    fn cbrt(self) -> Self {
+        cbrtf(self)
+    }
+
+    #[inline]
+    fn hypot(self, other: Self) -> Self {
+        hypotf(self, other)
+    }
+
+    #[inline]
+    fn sin(self) -> Self {
+        sinf(self)
+    }
+
+    #[inline]
+    fn cos(self) -> Self {
+        cosf(self)
+    }
+
+    #[inline]
+    fn tan(self) -> Self {
+        tanf(self)
+    }
+
+    #[inline]
+    fn asin(self) -> Self {
+        asinf(self)
+    }
+
+    #[inline]
+    fn acos(self) -> Self {
+        acosf(self)
+    }
+
+    #[inline]
+    fn atan(self) -> Self {
+        atanf(self)
+    }
+
+    #[inline]
+    fn atan2(self, other: Self) -> Self {
+        atan2f(self, other)
+    }
+
+    #[inline]
+    fn sin_cos(self) -> (Self, Self) {
+        sincosf(self)
+    }
+
+    #[inline]
+    fn exp_m1(self) -> Self {
+        expm1f(self)
+    }
+
+    #[inline]
+    fn ln_1p(self) -> Self {
+        log1pf(self)
+    }
+
+    #[inline]
+    fn sinh(self) -> Self {
+        sinhf(self)
+    }
+
+    #[inline]
+    fn cosh(self) -> Self {
+        coshf(self)
+    }
+
+    #[inline]
+    fn tanh(self) -> Self {
+        tanhf(self)
+    }
+
+    #[inline]
+    fn asinh(self) -> Self {
+        asinhf(self)
+    }
+
+    #[inline]
+    fn acosh(self) -> Self {
+        acoshf(self)
+    }
+
+    #[inline]
+    fn atanh(self) -> Self {
+        atanhf(self)
+    }
+
+    #[inline]
+    fn min(self, other: Self) -> Self {
+        fminf(self, other)
+    }
+
+    #[inline]
+    fn max(self, other: Self) -> Self {
+        fmaxf(self, other)
+    }
+}
+
+/// Math support for `f64`
+///
+/// This trait is sealed and cannot be implemented outside of `libm`.
+pub trait F64Ext: private::Sealed + Sized {
+    fn floor(self) -> Self;
+
+    fn ceil(self) -> Self;
+
+    fn round(self) -> Self;
+
+    fn trunc(self) -> Self;
+
+    fn fdim(self, rhs: Self) -> Self;
+
+    fn fract(self) -> Self;
+
+    fn abs(self) -> Self;
+
+    // NOTE depends on unstable intrinsics::copysignf64
+    // fn signum(self) -> Self;
+
+    fn mul_add(self, a: Self, b: Self) -> Self;
+
+    fn div_euc(self, rhs: Self) -> Self;
+
+    fn mod_euc(self, rhs: Self) -> Self;
+
+    // NOTE depends on unstable intrinsics::powif64
+    // fn powi(self, n: i32) -> Self;
+
+    fn powf(self, n: Self) -> Self;
+
+    fn sqrt(self) -> Self;
+
+    fn exp(self) -> Self;
+
+    fn exp2(self) -> Self;
+
+    fn ln(self) -> Self;
+
+    fn log(self, base: Self) -> Self;
+
+    fn log2(self) -> Self;
+
+    fn log10(self) -> Self;
+
+    fn cbrt(self) -> Self;
+
+    fn hypot(self, other: Self) -> Self;
+
+    fn sin(self) -> Self;
+
+    fn cos(self) -> Self;
+
+    fn tan(self) -> Self;
+
+    fn asin(self) -> Self;
+
+    fn acos(self) -> Self;
+
+    fn atan(self) -> Self;
+
+    fn atan2(self, other: Self) -> Self;
+
+    fn sin_cos(self) -> (Self, Self);
+
+    fn exp_m1(self) -> Self;
+
+    fn ln_1p(self) -> Self;
+
+    fn sinh(self) -> Self;
+
+    fn cosh(self) -> Self;
+
+    fn tanh(self) -> Self;
+
+    fn asinh(self) -> Self;
+
+    fn acosh(self) -> Self;
+
+    fn atanh(self) -> Self;
+
+    fn min(self, other: Self) -> Self;
+
+    fn max(self, other: Self) -> Self;
+}
+
+impl F64Ext for f64 {
+    #[inline]
+    fn floor(self) -> Self {
+        floor(self)
+    }
+
+    #[inline]
+    fn ceil(self) -> Self {
+        ceil(self)
+    }
+
+    #[inline]
+    fn round(self) -> Self {
+        round(self)
+    }
+
+    #[inline]
+    fn trunc(self) -> Self {
+        trunc(self)
+    }
+
+    #[inline]
+    fn fdim(self, rhs: Self) -> Self {
+        fdim(self, rhs)
+    }
+
+    #[inline]
+    fn fract(self) -> Self {
+        self - self.trunc()
+    }
+
+    #[inline]
+    fn abs(self) -> Self {
+        fabs(self)
+    }
+
+    #[inline]
+    fn mul_add(self, a: Self, b: Self) -> Self {
+        fma(self, a, b)
+    }
+
+    #[inline]
+    fn div_euc(self, rhs: Self) -> Self {
+        let q = (self / rhs).trunc();
+        if self % rhs < 0.0 {
+            return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
+        }
+        q
+    }
+
+    #[inline]
+    fn mod_euc(self, rhs: f64) -> f64 {
+        let r = self % rhs;
+        if r < 0.0 {
+            r + rhs.abs()
+        } else {
+            r
+        }
+    }
+
+    #[inline]
+    fn powf(self, n: Self) -> Self {
+        pow(self, n)
+    }
+
+    #[inline]
+    fn sqrt(self) -> Self {
+        sqrt(self)
+    }
+
+    #[inline]
+    fn exp(self) -> Self {
+        exp(self)
+    }
+
+    #[inline]
+    fn exp2(self) -> Self {
+        exp2(self)
+    }
+
+    #[inline]
+    fn ln(self) -> Self {
+        log(self)
+    }
+
+    #[inline]
+    fn log(self, base: Self) -> Self {
+        self.ln() / base.ln()
+    }
+
+    #[inline]
+    fn log2(self) -> Self {
+        log2(self)
+    }
+
+    #[inline]
+    fn log10(self) -> Self {
+        log10(self)
+    }
+
+    #[inline]
+    fn cbrt(self) -> Self {
+        cbrt(self)
+    }
+
+    #[inline]
+    fn hypot(self, other: Self) -> Self {
+        hypot(self, other)
+    }
+
+    #[inline]
+    fn sin(self) -> Self {
+        sin(self)
+    }
+
+    #[inline]
+    fn cos(self) -> Self {
+        cos(self)
+    }
+
+    #[inline]
+    fn tan(self) -> Self {
+        tan(self)
+    }
+
+    #[inline]
+    fn asin(self) -> Self {
+        asin(self)
+    }
+
+    #[inline]
+    fn acos(self) -> Self {
+        acos(self)
+    }
+
+    #[inline]
+    fn atan(self) -> Self {
+        atan(self)
+    }
+
+    #[inline]
+    fn atan2(self, other: Self) -> Self {
+        atan2(self, other)
+    }
+
+    #[inline]
+    fn sin_cos(self) -> (Self, Self) {
+        sincos(self)
+    }
+
+    #[inline]
+    fn exp_m1(self) -> Self {
+        expm1(self)
+    }
+
+    #[inline]
+    fn ln_1p(self) -> Self {
+        log1p(self)
+    }
+
+    #[inline]
+    fn sinh(self) -> Self {
+        sinh(self)
+    }
+
+    #[inline]
+    fn cosh(self) -> Self {
+        cosh(self)
+    }
+
+    #[inline]
+    fn tanh(self) -> Self {
+        tanh(self)
+    }
+
+    #[inline]
+    fn asinh(self) -> Self {
+        asinh(self)
+    }
+
+    #[inline]
+    fn acosh(self) -> Self {
+        acosh(self)
+    }
+
+    #[inline]
+    fn atanh(self) -> Self {
+        atanh(self)
+    }
+
+    #[inline]
+    fn min(self, other: Self) -> Self {
+        fmin(self, other)
+    }
+
+    #[inline]
+    fn max(self, other: Self) -> Self {
+        fmax(self, other)
+    }
+}
+
+mod private {
+    pub trait Sealed {}
+
+    impl Sealed for f32 {}
+    impl Sealed for f64 {}
+}
+
+#[cfg(all(test, feature = "musl-reference-tests"))]
+include!(concat!(env!("OUT_DIR"), "/musl-tests.rs"));
+
+
\ No newline at end of file diff --git a/src/libm/math/acos.rs.html b/src/libm/math/acos.rs.html new file mode 100644 index 000000000..9f9379d1d --- /dev/null +++ b/src/libm/math/acos.rs.html @@ -0,0 +1,231 @@ +acos.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_acos.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* acos(x)
+ * Method :
+ *      acos(x)  = pi/2 - asin(x)
+ *      acos(-x) = pi/2 + asin(x)
+ * For |x|<=0.5
+ *      acos(x) = pi/2 - (x + x*x^2*R(x^2))     (see asin.c)
+ * For x>0.5
+ *      acos(x) = pi/2 - (pi/2 - 2asin(sqrt((1-x)/2)))
+ *              = 2asin(sqrt((1-x)/2))
+ *              = 2s + 2s*z*R(z)        ...z=(1-x)/2, s=sqrt(z)
+ *              = 2f + (2c + 2s*z*R(z))
+ *     where f=hi part of s, and c = (z-f*f)/(s+f) is the correction term
+ *     for f so that f+c ~ sqrt(z).
+ * For x<-0.5
+ *      acos(x) = pi - 2asin(sqrt((1-|x|)/2))
+ *              = pi - 0.5*(s+s*z*R(z)), where z=(1-|x|)/2,s=sqrt(z)
+ *
+ * Special cases:
+ *      if x is NaN, return x itself;
+ *      if |x|>1, return NaN with invalid signal.
+ *
+ * Function needed: sqrt
+ */
+
+use super::sqrt;
+
+const PIO2_HI: f64 = 1.57079632679489655800e+00; /* 0x3FF921FB, 0x54442D18 */
+const PIO2_LO: f64 = 6.12323399573676603587e-17; /* 0x3C91A626, 0x33145C07 */
+const PS0: f64 = 1.66666666666666657415e-01; /* 0x3FC55555, 0x55555555 */
+const PS1: f64 = -3.25565818622400915405e-01; /* 0xBFD4D612, 0x03EB6F7D */
+const PS2: f64 = 2.01212532134862925881e-01; /* 0x3FC9C155, 0x0E884455 */
+const PS3: f64 = -4.00555345006794114027e-02; /* 0xBFA48228, 0xB5688F3B */
+const PS4: f64 = 7.91534994289814532176e-04; /* 0x3F49EFE0, 0x7501B288 */
+const PS5: f64 = 3.47933107596021167570e-05; /* 0x3F023DE1, 0x0DFDF709 */
+const QS1: f64 = -2.40339491173441421878e+00; /* 0xC0033A27, 0x1C8A2D4B */
+const QS2: f64 = 2.02094576023350569471e+00; /* 0x40002AE5, 0x9C598AC8 */
+const QS3: f64 = -6.88283971605453293030e-01; /* 0xBFE6066C, 0x1B8D0159 */
+const QS4: f64 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+#[inline]
+fn r(z: f64) -> f64 {
+    let p: f64 = z * (PS0 + z * (PS1 + z * (PS2 + z * (PS3 + z * (PS4 + z * PS5)))));
+    let q: f64 = 1.0 + z * (QS1 + z * (QS2 + z * (QS3 + z * QS4)));
+    p / q
+}
+
+/// Arccosine (f64)
+///
+/// Computes the inverse cosine (arc cosine) of the input value.
+/// Arguments must be in the range -1 to 1.
+/// Returns values in radians, in the range of 0 to pi.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn acos(x: f64) -> f64 {
+    let x1p_120f = f64::from_bits(0x3870000000000000); // 0x1p-120 === 2 ^ -120
+    let z: f64;
+    let w: f64;
+    let s: f64;
+    let c: f64;
+    let df: f64;
+    let hx: u32;
+    let ix: u32;
+
+    hx = (x.to_bits() >> 32) as u32;
+    ix = hx & 0x7fffffff;
+    /* |x| >= 1 or nan */
+    if ix >= 0x3ff00000 {
+        let lx: u32 = x.to_bits() as u32;
+
+        if ((ix - 0x3ff00000) | lx) == 0 {
+            /* acos(1)=0, acos(-1)=pi */
+            if (hx >> 31) != 0 {
+                return 2. * PIO2_HI + x1p_120f;
+            }
+            return 0.;
+        }
+        return 0. / (x - x);
+    }
+    /* |x| < 0.5 */
+    if ix < 0x3fe00000 {
+        if ix <= 0x3c600000 {
+            /* |x| < 2**-57 */
+            return PIO2_HI + x1p_120f;
+        }
+        return PIO2_HI - (x - (PIO2_LO - x * r(x * x)));
+    }
+    /* x < -0.5 */
+    if (hx >> 31) != 0 {
+        z = (1.0 + x) * 0.5;
+        s = sqrt(z);
+        w = r(z) * s - PIO2_LO;
+        return 2. * (PIO2_HI - (s + w));
+    }
+    /* x > 0.5 */
+    z = (1.0 - x) * 0.5;
+    s = sqrt(z);
+    // Set the low 4 bytes to zero
+    df = f64::from_bits(s.to_bits() & 0xff_ff_ff_ff_00_00_00_00);
+
+    c = (z - df * df) / (s + df);
+    w = r(z) * s + c;
+    2. * (df + w)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/acosf.rs.html b/src/libm/math/acosf.rs.html new file mode 100644 index 000000000..75a3e632f --- /dev/null +++ b/src/libm/math/acosf.rs.html @@ -0,0 +1,165 @@ +acosf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_acosf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::sqrtf::sqrtf;
+
+const PIO2_HI: f32 = 1.5707962513e+00; /* 0x3fc90fda */
+const PIO2_LO: f32 = 7.5497894159e-08; /* 0x33a22168 */
+const P_S0: f32 = 1.6666586697e-01;
+const P_S1: f32 = -4.2743422091e-02;
+const P_S2: f32 = -8.6563630030e-03;
+const Q_S1: f32 = -7.0662963390e-01;
+
+#[inline]
+fn r(z: f32) -> f32 {
+    let p = z * (P_S0 + z * (P_S1 + z * P_S2));
+    let q = 1. + z * Q_S1;
+    p / q
+}
+
+/// Arccosine (f32)
+///
+/// Computes the inverse cosine (arc cosine) of the input value.
+/// Arguments must be in the range -1 to 1.
+/// Returns values in radians, in the range of 0 to pi.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn acosf(x: f32) -> f32 {
+    let x1p_120 = f32::from_bits(0x03800000); // 0x1p-120 === 2 ^ (-120)
+
+    let z: f32;
+    let w: f32;
+    let s: f32;
+
+    let mut hx = x.to_bits();
+    let ix = hx & 0x7fffffff;
+    /* |x| >= 1 or nan */
+    if ix >= 0x3f800000 {
+        if ix == 0x3f800000 {
+            if (hx >> 31) != 0 {
+                return 2. * PIO2_HI + x1p_120;
+            }
+            return 0.;
+        }
+        return 0. / (x - x);
+    }
+    /* |x| < 0.5 */
+    if ix < 0x3f000000 {
+        if ix <= 0x32800000 {
+            /* |x| < 2**-26 */
+            return PIO2_HI + x1p_120;
+        }
+        return PIO2_HI - (x - (PIO2_LO - x * r(x * x)));
+    }
+    /* x < -0.5 */
+    if (hx >> 31) != 0 {
+        z = (1. + x) * 0.5;
+        s = sqrtf(z);
+        w = r(z) * s - PIO2_LO;
+        return 2. * (PIO2_HI - (s + w));
+    }
+    /* x > 0.5 */
+    z = (1. - x) * 0.5;
+    s = sqrtf(z);
+    hx = s.to_bits();
+    let df = f32::from_bits(hx & 0xfffff000);
+    let c = (z - df * df) / (s + df);
+    w = r(z) * s + c;
+    2. * (df + w)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/acosh.rs.html b/src/libm/math/acosh.rs.html new file mode 100644 index 000000000..d0b27f065 --- /dev/null +++ b/src/libm/math/acosh.rs.html @@ -0,0 +1,55 @@ +acosh.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+
+use super::{log, log1p, sqrt};
+
+const LN2: f64 = 0.693147180559945309417232121458176568; /* 0x3fe62e42,  0xfefa39ef*/
+
+/// Inverse hyperbolic cosine (f64)
+///
+/// Calculates the inverse hyperbolic cosine of `x`.
+/// Is defined as `log(x + sqrt(x*x-1))`.
+/// `x` must be a number greater than or equal to 1.
+pub fn acosh(x: f64) -> f64 {
+    let u = x.to_bits();
+    let e = ((u >> 52) as usize) & 0x7ff;
+
+    /* x < 1 domain error is handled in the called functions */
+
+    if e < 0x3ff + 1 {
+        /* |x| < 2, up to 2ulp error in [1,1.125] */
+        return log1p(x - 1.0 + sqrt((x - 1.0) * (x - 1.0) + 2.0 * (x - 1.0)));
+    }
+    if e < 0x3ff + 26 {
+        /* |x| < 0x1p26 */
+        return log(2.0 * x - 1.0 / (x + sqrt(x * x - 1.0)));
+    }
+    /* |x| >= 0x1p26 or nan */
+    return log(x) + LN2;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/acoshf.rs.html b/src/libm/math/acoshf.rs.html new file mode 100644 index 000000000..d30e3fd3a --- /dev/null +++ b/src/libm/math/acoshf.rs.html @@ -0,0 +1,53 @@ +acoshf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+
+use super::{log1pf, logf, sqrtf};
+
+const LN2: f32 = 0.693147180559945309417232121458176568;
+
+/// Inverse hyperbolic cosine (f32)
+///
+/// Calculates the inverse hyperbolic cosine of `x`.
+/// Is defined as `log(x + sqrt(x*x-1))`.
+/// `x` must be a number greater than or equal to 1.
+pub fn acoshf(x: f32) -> f32 {
+    let u = x.to_bits();
+    let a = u & 0x7fffffff;
+
+    if a < 0x3f800000 + (1 << 23) {
+        /* |x| < 2, invalid if x < 1 or nan */
+        /* up to 2ulp error in [1,1.125] */
+        return log1pf(x - 1.0 + sqrtf((x - 1.0) * (x - 1.0) + 2.0 * (x - 1.0)));
+    }
+    if a < 0x3f800000 + (12 << 23) {
+        /* |x| < 0x1p12 */
+        return logf(2.0 * x - 1.0 / (x + sqrtf(x * x - 1.0)));
+    }
+    /* x >= 0x1p12 */
+    return logf(x) + LN2;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/asin.rs.html b/src/libm/math/asin.rs.html new file mode 100644 index 000000000..38ecbf351 --- /dev/null +++ b/src/libm/math/asin.rs.html @@ -0,0 +1,245 @@ +asin.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_asin.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* asin(x)
+ * Method :
+ *      Since  asin(x) = x + x^3/6 + x^5*3/40 + x^7*15/336 + ...
+ *      we approximate asin(x) on [0,0.5] by
+ *              asin(x) = x + x*x^2*R(x^2)
+ *      where
+ *              R(x^2) is a rational approximation of (asin(x)-x)/x^3
+ *      and its remez error is bounded by
+ *              |(asin(x)-x)/x^3 - R(x^2)| < 2^(-58.75)
+ *
+ *      For x in [0.5,1]
+ *              asin(x) = pi/2-2*asin(sqrt((1-x)/2))
+ *      Let y = (1-x), z = y/2, s := sqrt(z), and pio2_hi+pio2_lo=pi/2;
+ *      then for x>0.98
+ *              asin(x) = pi/2 - 2*(s+s*z*R(z))
+ *                      = pio2_hi - (2*(s+s*z*R(z)) - pio2_lo)
+ *      For x<=0.98, let pio4_hi = pio2_hi/2, then
+ *              f = hi part of s;
+ *              c = sqrt(z) - f = (z-f*f)/(s+f)         ...f+c=sqrt(z)
+ *      and
+ *              asin(x) = pi/2 - 2*(s+s*z*R(z))
+ *                      = pio4_hi+(pio4-2s)-(2s*z*R(z)-pio2_lo)
+ *                      = pio4_hi+(pio4-2f)-(2s*z*R(z)-(pio2_lo+2c))
+ *
+ * Special cases:
+ *      if x is NaN, return x itself;
+ *      if |x|>1, return NaN with invalid signal.
+ *
+ */
+
+use super::{fabs, get_high_word, get_low_word, sqrt, with_set_low_word};
+
+const PIO2_HI: f64 = 1.57079632679489655800e+00; /* 0x3FF921FB, 0x54442D18 */
+const PIO2_LO: f64 = 6.12323399573676603587e-17; /* 0x3C91A626, 0x33145C07 */
+/* coefficients for R(x^2) */
+const P_S0: f64 = 1.66666666666666657415e-01; /* 0x3FC55555, 0x55555555 */
+const P_S1: f64 = -3.25565818622400915405e-01; /* 0xBFD4D612, 0x03EB6F7D */
+const P_S2: f64 = 2.01212532134862925881e-01; /* 0x3FC9C155, 0x0E884455 */
+const P_S3: f64 = -4.00555345006794114027e-02; /* 0xBFA48228, 0xB5688F3B */
+const P_S4: f64 = 7.91534994289814532176e-04; /* 0x3F49EFE0, 0x7501B288 */
+const P_S5: f64 = 3.47933107596021167570e-05; /* 0x3F023DE1, 0x0DFDF709 */
+const Q_S1: f64 = -2.40339491173441421878e+00; /* 0xC0033A27, 0x1C8A2D4B */
+const Q_S2: f64 = 2.02094576023350569471e+00; /* 0x40002AE5, 0x9C598AC8 */
+const Q_S3: f64 = -6.88283971605453293030e-01; /* 0xBFE6066C, 0x1B8D0159 */
+const Q_S4: f64 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+#[inline]
+fn comp_r(z: f64) -> f64 {
+    let p = z * (P_S0 + z * (P_S1 + z * (P_S2 + z * (P_S3 + z * (P_S4 + z * P_S5)))));
+    let q = 1.0 + z * (Q_S1 + z * (Q_S2 + z * (Q_S3 + z * Q_S4)));
+    p / q
+}
+
+/// Arcsine (f64)
+///
+/// Computes the inverse sine (arc sine) of the argument `x`.
+/// Arguments to asin must be in the range -1 to 1.
+/// Returns values in radians, in the range of -pi/2 to pi/2.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn asin(mut x: f64) -> f64 {
+    let z: f64;
+    let r: f64;
+    let s: f64;
+    let hx: u32;
+    let ix: u32;
+
+    hx = get_high_word(x);
+    ix = hx & 0x7fffffff;
+    /* |x| >= 1 or nan */
+    if ix >= 0x3ff00000 {
+        let lx: u32;
+        lx = get_low_word(x);
+        if ((ix - 0x3ff00000) | lx) == 0 {
+            /* asin(1) = +-pi/2 with inexact */
+            return x * PIO2_HI + f64::from_bits(0x3870000000000000);
+        } else {
+            return 0.0 / (x - x);
+        }
+    }
+    /* |x| < 0.5 */
+    if ix < 0x3fe00000 {
+        /* if 0x1p-1022 <= |x| < 0x1p-26, avoid raising underflow */
+        if ix < 0x3e500000 && ix >= 0x00100000 {
+            return x;
+        } else {
+            return x + x * comp_r(x * x);
+        }
+    }
+    /* 1 > |x| >= 0.5 */
+    z = (1.0 - fabs(x)) * 0.5;
+    s = sqrt(z);
+    r = comp_r(z);
+    if ix >= 0x3fef3333 {
+        /* if |x| > 0.975 */
+        x = PIO2_HI - (2. * (s + s * r) - PIO2_LO);
+    } else {
+        let f: f64;
+        let c: f64;
+        /* f+c = sqrt(z) */
+        f = with_set_low_word(s, 0);
+        c = (z - f * f) / (s + f);
+        x = 0.5 * PIO2_HI - (2.0 * s * r - (PIO2_LO - 2.0 * c) - (0.5 * PIO2_HI - 2.0 * f));
+    }
+    if hx >> 31 != 0 {
+        -x
+    } else {
+        x
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/asinf.rs.html b/src/libm/math/asinf.rs.html new file mode 100644 index 000000000..20c16337f --- /dev/null +++ b/src/libm/math/asinf.rs.html @@ -0,0 +1,151 @@ +asinf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_asinf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::fabsf::fabsf;
+use super::sqrt::sqrt;
+
+const PIO2: f64 = 1.570796326794896558e+00;
+
+/* coefficients for R(x^2) */
+const P_S0: f32 = 1.6666586697e-01;
+const P_S1: f32 = -4.2743422091e-02;
+const P_S2: f32 = -8.6563630030e-03;
+const Q_S1: f32 = -7.0662963390e-01;
+
+#[inline]
+fn r(z: f32) -> f32 {
+    let p = z * (P_S0 + z * (P_S1 + z * P_S2));
+    let q = 1. + z * Q_S1;
+    p / q
+}
+
+/// Arcsine (f32)
+///
+/// Computes the inverse sine (arc sine) of the argument `x`.
+/// Arguments to asin must be in the range -1 to 1.
+/// Returns values in radians, in the range of -pi/2 to pi/2.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn asinf(mut x: f32) -> f32 {
+    let x1p_120 = f64::from_bits(0x3870000000000000); // 0x1p-120 === 2 ^ (-120)
+
+    let hx = x.to_bits();
+    let ix = hx & 0x7fffffff;
+
+    if ix >= 0x3f800000 {
+        /* |x| >= 1 */
+        if ix == 0x3f800000 {
+            /* |x| == 1 */
+            return ((x as f64) * PIO2 + x1p_120) as f32; /* asin(+-1) = +-pi/2 with inexact */
+        }
+        return 0. / (x - x); /* asin(|x|>1) is NaN */
+    }
+
+    if ix < 0x3f000000 {
+        /* |x| < 0.5 */
+        /* if 0x1p-126 <= |x| < 0x1p-12, avoid raising underflow */
+        if (ix < 0x39800000) && (ix >= 0x00800000) {
+            return x;
+        }
+        return x + x * r(x * x);
+    }
+
+    /* 1 > |x| >= 0.5 */
+    let z = (1. - fabsf(x)) * 0.5;
+    let s = sqrt(z as f64);
+    x = (PIO2 - 2. * (s + s * (r(z) as f64))) as f32;
+    if (hx >> 31) != 0 {
+        -x
+    } else {
+        x
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/asinh.rs.html b/src/libm/math/asinh.rs.html new file mode 100644 index 000000000..7e7aa7363 --- /dev/null +++ b/src/libm/math/asinh.rs.html @@ -0,0 +1,81 @@ +asinh.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+
+use super::{log, log1p, sqrt};
+
+const LN2: f64 = 0.693147180559945309417232121458176568; /* 0x3fe62e42,  0xfefa39ef*/
+
+/* asinh(x) = sign(x)*log(|x|+sqrt(x*x+1)) ~= x - x^3/6 + o(x^5) */
+/// Inverse hyperbolic sine (f64)
+///
+/// Calculates the inverse hyperbolic sine of `x`.
+/// Is defined as `sgn(x)*log(|x|+sqrt(x*x+1))`.
+pub fn asinh(mut x: f64) -> f64 {
+    let mut u = x.to_bits();
+    let e = ((u >> 52) as usize) & 0x7ff;
+    let sign = (u >> 63) != 0;
+
+    /* |x| */
+    u &= (!0) >> 1;
+    x = f64::from_bits(u);
+
+    if e >= 0x3ff + 26 {
+        /* |x| >= 0x1p26 or inf or nan */
+        x = log(x) + LN2;
+    } else if e >= 0x3ff + 1 {
+        /* |x| >= 2 */
+        x = log(2.0 * x + 1.0 / (sqrt(x * x + 1.0) + x));
+    } else if e >= 0x3ff - 26 {
+        /* |x| >= 0x1p-26, up to 1.6ulp error in [0.125,0.5] */
+        x = log1p(x + x * x / (sqrt(x * x + 1.0) + 1.0));
+    } else {
+        /* |x| < 0x1p-26, raise inexact if x != 0 */
+        let x1p120 = f64::from_bits(0x4770000000000000);
+        force_eval!(x + x1p120);
+    }
+
+    if sign {
+        -x
+    } else {
+        x
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/asinhf.rs.html b/src/libm/math/asinhf.rs.html new file mode 100644 index 000000000..46d0dc729 --- /dev/null +++ b/src/libm/math/asinhf.rs.html @@ -0,0 +1,79 @@ +asinhf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+
+use super::{log1pf, logf, sqrtf};
+
+const LN2: f32 = 0.693147180559945309417232121458176568;
+
+/* asinh(x) = sign(x)*log(|x|+sqrt(x*x+1)) ~= x - x^3/6 + o(x^5) */
+/// Inverse hyperbolic sine (f32)
+///
+/// Calculates the inverse hyperbolic sine of `x`.
+/// Is defined as `sgn(x)*log(|x|+sqrt(x*x+1))`.
+pub fn asinhf(mut x: f32) -> f32 {
+    let u = x.to_bits();
+    let i = u & 0x7fffffff;
+    let sign = (u >> 31) != 0;
+
+    /* |x| */
+    x = f32::from_bits(i);
+
+    if i >= 0x3f800000 + (12 << 23) {
+        /* |x| >= 0x1p12 or inf or nan */
+        x = logf(x) + LN2;
+    } else if i >= 0x3f800000 + (1 << 23) {
+        /* |x| >= 2 */
+        x = logf(2.0 * x + 1.0 / (sqrtf(x * x + 1.0) + x));
+    } else if i >= 0x3f800000 - (12 << 23) {
+        /* |x| >= 0x1p-12, up to 1.6ulp error in [0.125,0.5] */
+        x = log1pf(x + x * x / (sqrtf(x * x + 1.0) + 1.0));
+    } else {
+        /* |x| < 0x1p-12, raise inexact if x!=0 */
+        let x1p120 = f32::from_bits(0x7b800000);
+        force_eval!(x + x1p120);
+    }
+
+    if sign {
+        -x
+    } else {
+        x
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/atan.rs.html b/src/libm/math/atan.rs.html new file mode 100644 index 000000000..f17d27331 --- /dev/null +++ b/src/libm/math/atan.rs.html @@ -0,0 +1,373 @@ +atan.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_atan.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* atan(x)
+ * Method
+ *   1. Reduce x to positive by atan(x) = -atan(-x).
+ *   2. According to the integer k=4t+0.25 chopped, t=x, the argument
+ *      is further reduced to one of the following intervals and the
+ *      arctangent of t is evaluated by the corresponding formula:
+ *
+ *      [0,7/16]      atan(x) = t-t^3*(a1+t^2*(a2+...(a10+t^2*a11)...)
+ *      [7/16,11/16]  atan(x) = atan(1/2) + atan( (t-0.5)/(1+t/2) )
+ *      [11/16.19/16] atan(x) = atan( 1 ) + atan( (t-1)/(1+t) )
+ *      [19/16,39/16] atan(x) = atan(3/2) + atan( (t-1.5)/(1+1.5t) )
+ *      [39/16,INF]   atan(x) = atan(INF) + atan( -1/t )
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+use super::fabs;
+use core::f64;
+
+const ATANHI: [f64; 4] = [
+    4.63647609000806093515e-01, /* atan(0.5)hi 0x3FDDAC67, 0x0561BB4F */
+    7.85398163397448278999e-01, /* atan(1.0)hi 0x3FE921FB, 0x54442D18 */
+    9.82793723247329054082e-01, /* atan(1.5)hi 0x3FEF730B, 0xD281F69B */
+    1.57079632679489655800e+00, /* atan(inf)hi 0x3FF921FB, 0x54442D18 */
+];
+
+const ATANLO: [f64; 4] = [
+    2.26987774529616870924e-17, /* atan(0.5)lo 0x3C7A2B7F, 0x222F65E2 */
+    3.06161699786838301793e-17, /* atan(1.0)lo 0x3C81A626, 0x33145C07 */
+    1.39033110312309984516e-17, /* atan(1.5)lo 0x3C700788, 0x7AF0CBBD */
+    6.12323399573676603587e-17, /* atan(inf)lo 0x3C91A626, 0x33145C07 */
+];
+
+const AT: [f64; 11] = [
+    3.33333333333329318027e-01,  /* 0x3FD55555, 0x5555550D */
+    -1.99999999998764832476e-01, /* 0xBFC99999, 0x9998EBC4 */
+    1.42857142725034663711e-01,  /* 0x3FC24924, 0x920083FF */
+    -1.11111104054623557880e-01, /* 0xBFBC71C6, 0xFE231671 */
+    9.09088713343650656196e-02,  /* 0x3FB745CD, 0xC54C206E */
+    -7.69187620504482999495e-02, /* 0xBFB3B0F2, 0xAF749A6D */
+    6.66107313738753120669e-02,  /* 0x3FB10D66, 0xA0D03D51 */
+    -5.83357013379057348645e-02, /* 0xBFADDE2D, 0x52DEFD9A */
+    4.97687799461593236017e-02,  /* 0x3FA97B4B, 0x24760DEB */
+    -3.65315727442169155270e-02, /* 0xBFA2B444, 0x2C6A6C2F */
+    1.62858201153657823623e-02,  /* 0x3F90AD3A, 0xE322DA11 */
+];
+
+/// Arctangent (f64)
+///
+/// Computes the inverse tangent (arc tangent) of the input value.
+/// Returns a value in radians, in the range of -pi/2 to pi/2.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn atan(x: f64) -> f64 {
+    let mut x = x;
+    let mut ix = (x.to_bits() >> 32) as u32;
+    let sign = ix >> 31;
+    ix &= 0x7fff_ffff;
+    if ix >= 0x4410_0000 {
+        if x.is_nan() {
+            return x;
+        }
+
+        let z = ATANHI[3] + f64::from_bits(0x0380_0000); // 0x1p-120f
+        return if sign != 0 { -z } else { z };
+    }
+
+    let id = if ix < 0x3fdc_0000 {
+        /* |x| < 0.4375 */
+        if ix < 0x3e40_0000 {
+            /* |x| < 2^-27 */
+            if ix < 0x0010_0000 {
+                /* raise underflow for subnormal x */
+                force_eval!(x as f32);
+            }
+
+            return x;
+        }
+
+        -1
+    } else {
+        x = fabs(x);
+        if ix < 0x3ff30000 {
+            /* |x| < 1.1875 */
+            if ix < 0x3fe60000 {
+                /* 7/16 <= |x| < 11/16 */
+                x = (2. * x - 1.) / (2. + x);
+                0
+            } else {
+                /* 11/16 <= |x| < 19/16 */
+                x = (x - 1.) / (x + 1.);
+                1
+            }
+        } else if ix < 0x40038000 {
+            /* |x| < 2.4375 */
+            x = (x - 1.5) / (1. + 1.5 * x);
+            2
+        } else {
+            /* 2.4375 <= |x| < 2^66 */
+            x = -1. / x;
+            3
+        }
+    };
+
+    let z = x * x;
+    let w = z * z;
+    /* break sum from i=0 to 10 AT[i]z**(i+1) into odd and even poly */
+    let s1 = z * (AT[0] + w * (AT[2] + w * (AT[4] + w * (AT[6] + w * (AT[8] + w * AT[10])))));
+    let s2 = w * (AT[1] + w * (AT[3] + w * (AT[5] + w * (AT[7] + w * AT[9]))));
+
+    if id < 0 {
+        return x - x * (s1 + s2);
+    }
+
+    let z = i!(ATANHI, id as usize) - (x * (s1 + s2) - i!(ATANLO, id as usize) - x);
+
+    if sign != 0 {
+        -z
+    } else {
+        z
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use super::atan;
+    use core::f64;
+
+    #[test]
+    fn sanity_check() {
+        for (input, answer) in [
+            (3.0_f64.sqrt() / 3.0, f64::consts::FRAC_PI_6),
+            (1.0, f64::consts::FRAC_PI_4),
+            (3.0_f64.sqrt(), f64::consts::FRAC_PI_3),
+            (-3.0_f64.sqrt() / 3.0, -f64::consts::FRAC_PI_6),
+            (-1.0, -f64::consts::FRAC_PI_4),
+            (-3.0_f64.sqrt(), -f64::consts::FRAC_PI_3),
+        ]
+        .iter()
+        {
+            assert!(
+                (atan(*input) - answer) / answer < 1e-5,
+                "\natan({:.4}/16) = {:.4}, actual: {}",
+                input * 16.0,
+                answer,
+                atan(*input)
+            );
+        }
+    }
+
+    #[test]
+    fn zero() {
+        assert_eq!(atan(0.0), 0.0);
+    }
+
+    #[test]
+    fn infinity() {
+        assert_eq!(atan(f64::INFINITY), f64::consts::FRAC_PI_2);
+    }
+
+    #[test]
+    fn minus_infinity() {
+        assert_eq!(atan(f64::NEG_INFINITY), -f64::consts::FRAC_PI_2);
+    }
+
+    #[test]
+    fn nan() {
+        assert!(atan(f64::NAN).is_nan());
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/atan2.rs.html b/src/libm/math/atan2.rs.html new file mode 100644 index 000000000..5fcdd508b --- /dev/null +++ b/src/libm/math/atan2.rs.html @@ -0,0 +1,257 @@ +atan2.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_atan2.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+/* atan2(y,x)
+ * Method :
+ *      1. Reduce y to positive by atan2(y,x)=-atan2(-y,x).
+ *      2. Reduce x to positive by (if x and y are unexceptional):
+ *              ARG (x+iy) = arctan(y/x)           ... if x > 0,
+ *              ARG (x+iy) = pi - arctan[y/(-x)]   ... if x < 0,
+ *
+ * Special cases:
+ *
+ *      ATAN2((anything), NaN ) is NaN;
+ *      ATAN2(NAN , (anything) ) is NaN;
+ *      ATAN2(+-0, +(anything but NaN)) is +-0  ;
+ *      ATAN2(+-0, -(anything but NaN)) is +-pi ;
+ *      ATAN2(+-(anything but 0 and NaN), 0) is +-pi/2;
+ *      ATAN2(+-(anything but INF and NaN), +INF) is +-0 ;
+ *      ATAN2(+-(anything but INF and NaN), -INF) is +-pi;
+ *      ATAN2(+-INF,+INF ) is +-pi/4 ;
+ *      ATAN2(+-INF,-INF ) is +-3pi/4;
+ *      ATAN2(+-INF, (anything but,0,NaN, and INF)) is +-pi/2;
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+use super::atan;
+use super::fabs;
+
+const PI: f64 = 3.1415926535897931160E+00; /* 0x400921FB, 0x54442D18 */
+const PI_LO: f64 = 1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
+
+/// Arctangent of y/x (f64)
+///
+/// Computes the inverse tangent (arc tangent) of `y/x`.
+/// Produces the correct result even for angles near pi/2 or -pi/2 (that is, when `x` is near 0).
+/// Returns a value in radians, in the range of -pi to pi.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn atan2(y: f64, x: f64) -> f64 {
+    if x.is_nan() || y.is_nan() {
+        return x + y;
+    }
+    let mut ix = (x.to_bits() >> 32) as u32;
+    let lx = x.to_bits() as u32;
+    let mut iy = (y.to_bits() >> 32) as u32;
+    let ly = y.to_bits() as u32;
+    if ((ix.wrapping_sub(0x3ff00000)) | lx) == 0 {
+        /* x = 1.0 */
+        return atan(y);
+    }
+    let m = ((iy >> 31) & 1) | ((ix >> 30) & 2); /* 2*sign(x)+sign(y) */
+    ix &= 0x7fffffff;
+    iy &= 0x7fffffff;
+
+    /* when y = 0 */
+    if (iy | ly) == 0 {
+        return match m {
+            0 | 1 => y, /* atan(+-0,+anything)=+-0 */
+            2 => PI,    /* atan(+0,-anything) = PI */
+            _ => -PI,   /* atan(-0,-anything) =-PI */
+        };
+    }
+    /* when x = 0 */
+    if (ix | lx) == 0 {
+        return if m & 1 != 0 { -PI / 2.0 } else { PI / 2.0 };
+    }
+    /* when x is INF */
+    if ix == 0x7ff00000 {
+        if iy == 0x7ff00000 {
+            return match m {
+                0 => PI / 4.0,        /* atan(+INF,+INF) */
+                1 => -PI / 4.0,       /* atan(-INF,+INF) */
+                2 => 3.0 * PI / 4.0,  /* atan(+INF,-INF) */
+                _ => -3.0 * PI / 4.0, /* atan(-INF,-INF) */
+            };
+        } else {
+            return match m {
+                0 => 0.0,  /* atan(+...,+INF) */
+                1 => -0.0, /* atan(-...,+INF) */
+                2 => PI,   /* atan(+...,-INF) */
+                _ => -PI,  /* atan(-...,-INF) */
+            };
+        }
+    }
+    /* |y/x| > 0x1p64 */
+    if ix.wrapping_add(64 << 20) < iy || iy == 0x7ff00000 {
+        return if m & 1 != 0 { -PI / 2.0 } else { PI / 2.0 };
+    }
+
+    /* z = atan(|y/x|) without spurious underflow */
+    let z = if (m & 2 != 0) && iy.wrapping_add(64 << 20) < ix {
+        /* |y/x| < 0x1p-64, x<0 */
+        0.0
+    } else {
+        atan(fabs(y / x))
+    };
+    match m {
+        0 => z,                /* atan(+,+) */
+        1 => -z,               /* atan(-,+) */
+        2 => PI - (z - PI_LO), /* atan(+,-) */
+        _ => (z - PI_LO) - PI, /* atan(-,-) */
+    }
+}
+
+#[test]
+fn sanity_check() {
+    assert_eq!(atan2(0.0, 1.0), 0.0);
+    assert_eq!(atan2(0.0, -1.0), PI);
+    assert_eq!(atan2(-0.0, -1.0), -PI);
+    assert_eq!(atan2(3.0, 2.0), atan(3.0 / 2.0));
+    assert_eq!(atan2(2.0, -1.0), atan(2.0 / -1.0) + PI);
+    assert_eq!(atan2(-2.0, -1.0), atan(-2.0 / -1.0) - PI);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/atan2f.rs.html b/src/libm/math/atan2f.rs.html new file mode 100644 index 000000000..43b306ef7 --- /dev/null +++ b/src/libm/math/atan2f.rs.html @@ -0,0 +1,187 @@ +atan2f.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_atan2f.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::atanf;
+use super::fabsf;
+
+const PI: f32 = 3.1415927410e+00; /* 0x40490fdb */
+const PI_LO: f32 = -8.7422776573e-08; /* 0xb3bbbd2e */
+
+/// Arctangent of y/x (f32)
+///
+/// Computes the inverse tangent (arc tangent) of `y/x`.
+/// Produces the correct result even for angles near pi/2 or -pi/2 (that is, when `x` is near 0).
+/// Returns a value in radians, in the range of -pi to pi.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn atan2f(y: f32, x: f32) -> f32 {
+    if x.is_nan() || y.is_nan() {
+        return x + y;
+    }
+    let mut ix = x.to_bits();
+    let mut iy = y.to_bits();
+
+    if ix == 0x3f800000 {
+        /* x=1.0 */
+        return atanf(y);
+    }
+    let m = ((iy >> 31) & 1) | ((ix >> 30) & 2); /* 2*sign(x)+sign(y) */
+    ix &= 0x7fffffff;
+    iy &= 0x7fffffff;
+
+    /* when y = 0 */
+    if iy == 0 {
+        return match m {
+            0 | 1 => y,   /* atan(+-0,+anything)=+-0 */
+            2 => PI,      /* atan(+0,-anything) = pi */
+            3 | _ => -PI, /* atan(-0,-anything) =-pi */
+        };
+    }
+    /* when x = 0 */
+    if ix == 0 {
+        return if m & 1 != 0 { -PI / 2. } else { PI / 2. };
+    }
+    /* when x is INF */
+    if ix == 0x7f800000 {
+        return if iy == 0x7f800000 {
+            match m {
+                0 => PI / 4.,           /* atan(+INF,+INF) */
+                1 => -PI / 4.,          /* atan(-INF,+INF) */
+                2 => 3. * PI / 4.,      /* atan(+INF,-INF)*/
+                3 | _ => -3. * PI / 4., /* atan(-INF,-INF)*/
+            }
+        } else {
+            match m {
+                0 => 0.,      /* atan(+...,+INF) */
+                1 => -0.,     /* atan(-...,+INF) */
+                2 => PI,      /* atan(+...,-INF) */
+                3 | _ => -PI, /* atan(-...,-INF) */
+            }
+        };
+    }
+    /* |y/x| > 0x1p26 */
+    if (ix + (26 << 23) < iy) || (iy == 0x7f800000) {
+        return if m & 1 != 0 { -PI / 2. } else { PI / 2. };
+    }
+
+    /* z = atan(|y/x|) with correct underflow */
+    let z = if (m & 2 != 0) && (iy + (26 << 23) < ix) {
+        /*|y/x| < 0x1p-26, x < 0 */
+        0.
+    } else {
+        atanf(fabsf(y / x))
+    };
+    match m {
+        0 => z,                /* atan(+,+) */
+        1 => -z,               /* atan(-,+) */
+        2 => PI - (z - PI_LO), /* atan(+,-) */
+        _ => (z - PI_LO) - PI, /* case 3 */ /* atan(-,-) */
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/atanf.rs.html b/src/libm/math/atanf.rs.html new file mode 100644 index 000000000..f8c6b6f26 --- /dev/null +++ b/src/libm/math/atanf.rs.html @@ -0,0 +1,229 @@ +atanf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_atanf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::fabsf;
+
+const ATAN_HI: [f32; 4] = [
+    4.6364760399e-01, /* atan(0.5)hi 0x3eed6338 */
+    7.8539812565e-01, /* atan(1.0)hi 0x3f490fda */
+    9.8279368877e-01, /* atan(1.5)hi 0x3f7b985e */
+    1.5707962513e+00, /* atan(inf)hi 0x3fc90fda */
+];
+
+const ATAN_LO: [f32; 4] = [
+    5.0121582440e-09, /* atan(0.5)lo 0x31ac3769 */
+    3.7748947079e-08, /* atan(1.0)lo 0x33222168 */
+    3.4473217170e-08, /* atan(1.5)lo 0x33140fb4 */
+    7.5497894159e-08, /* atan(inf)lo 0x33a22168 */
+];
+
+const A_T: [f32; 5] = [
+    3.3333328366e-01,
+    -1.9999158382e-01,
+    1.4253635705e-01,
+    -1.0648017377e-01,
+    6.1687607318e-02,
+];
+
+/// Arctangent (f32)
+///
+/// Computes the inverse tangent (arc tangent) of the input value.
+/// Returns a value in radians, in the range of -pi/2 to pi/2.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn atanf(mut x: f32) -> f32 {
+    let x1p_120 = f32::from_bits(0x03800000); // 0x1p-120 === 2 ^ (-120)
+
+    let z: f32;
+
+    let mut ix = x.to_bits();
+    let sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+
+    if ix >= 0x4c800000 {
+        /* if |x| >= 2**26 */
+        if x.is_nan() {
+            return x;
+        }
+        z = ATAN_HI[3] + x1p_120;
+        return if sign { -z } else { z };
+    }
+    let id = if ix < 0x3ee00000 {
+        /* |x| < 0.4375 */
+        if ix < 0x39800000 {
+            /* |x| < 2**-12 */
+            if ix < 0x00800000 {
+                /* raise underflow for subnormal x */
+                force_eval!(x * x);
+            }
+            return x;
+        }
+        -1
+    } else {
+        x = fabsf(x);
+        if ix < 0x3f980000 {
+            /* |x| < 1.1875 */
+            if ix < 0x3f300000 {
+                /*  7/16 <= |x| < 11/16 */
+                x = (2. * x - 1.) / (2. + x);
+                0
+            } else {
+                /* 11/16 <= |x| < 19/16 */
+                x = (x - 1.) / (x + 1.);
+                1
+            }
+        } else if ix < 0x401c0000 {
+            /* |x| < 2.4375 */
+            x = (x - 1.5) / (1. + 1.5 * x);
+            2
+        } else {
+            /* 2.4375 <= |x| < 2**26 */
+            x = -1. / x;
+            3
+        }
+    };
+    /* end of argument reduction */
+    z = x * x;
+    let w = z * z;
+    /* break sum from i=0 to 10 aT[i]z**(i+1) into odd and even poly */
+    let s1 = z * (A_T[0] + w * (A_T[2] + w * A_T[4]));
+    let s2 = w * (A_T[1] + w * A_T[3]);
+    if id < 0 {
+        return x - x * (s1 + s2);
+    }
+    let id = id as usize;
+    let z = ATAN_HI[id] - ((x * (s1 + s2) - ATAN_LO[id]) - x);
+    if sign {
+        -z
+    } else {
+        z
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/atanh.rs.html b/src/libm/math/atanh.rs.html new file mode 100644 index 000000000..6839bf01b --- /dev/null +++ b/src/libm/math/atanh.rs.html @@ -0,0 +1,75 @@ +atanh.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+
+use super::log1p;
+
+/* atanh(x) = log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2 ~= x + x^3/3 + o(x^5) */
+/// Inverse hyperbolic tangent (f64)
+///
+/// Calculates the inverse hyperbolic tangent of `x`.
+/// Is defined as `log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2`.
+pub fn atanh(x: f64) -> f64 {
+    let u = x.to_bits();
+    let e = ((u >> 52) as usize) & 0x7ff;
+    let sign = (u >> 63) != 0;
+
+    /* |x| */
+    let mut y = f64::from_bits(u & 0x7fff_ffff_ffff_ffff);
+
+    if e < 0x3ff - 1 {
+        if e < 0x3ff - 32 {
+            /* handle underflow */
+            if e == 0 {
+                force_eval!(y as f32);
+            }
+        } else {
+            /* |x| < 0.5, up to 1.7ulp error */
+            y = 0.5 * log1p(2.0 * y + 2.0 * y * y / (1.0 - y));
+        }
+    } else {
+        /* avoid overflow */
+        y = 0.5 * log1p(2.0 * (y / (1.0 - y)));
+    }
+
+    if sign {
+        -y
+    } else {
+        y
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/atanhf.rs.html b/src/libm/math/atanhf.rs.html new file mode 100644 index 000000000..b7516ae46 --- /dev/null +++ b/src/libm/math/atanhf.rs.html @@ -0,0 +1,75 @@ +atanhf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+
+use super::log1pf;
+
+/* atanh(x) = log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2 ~= x + x^3/3 + o(x^5) */
+/// Inverse hyperbolic tangent (f32)
+///
+/// Calculates the inverse hyperbolic tangent of `x`.
+/// Is defined as `log((1+x)/(1-x))/2 = log1p(2x/(1-x))/2`.
+pub fn atanhf(mut x: f32) -> f32 {
+    let mut u = x.to_bits();
+    let sign = (u >> 31) != 0;
+
+    /* |x| */
+    u &= 0x7fffffff;
+    x = f32::from_bits(u);
+
+    if u < 0x3f800000 - (1 << 23) {
+        if u < 0x3f800000 - (32 << 23) {
+            /* handle underflow */
+            if u < (1 << 23) {
+                force_eval!((x * x) as f32);
+            }
+        } else {
+            /* |x| < 0.5, up to 1.7ulp error */
+            x = 0.5 * log1pf(2.0 * x + 2.0 * x * x / (1.0 - x));
+        }
+    } else {
+        /* avoid overflow */
+        x = 0.5 * log1pf(2.0 * (x / (1.0 - x)));
+    }
+
+    if sign {
+        -x
+    } else {
+        x
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/cbrt.rs.html b/src/libm/math/cbrt.rs.html new file mode 100644 index 000000000..a40bcfc47 --- /dev/null +++ b/src/libm/math/cbrt.rs.html @@ -0,0 +1,231 @@ +cbrt.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_cbrt.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ * Optimized by Bruce D. Evans.
+ */
+/* cbrt(x)
+ * Return cube root of x
+ */
+
+use core::f64;
+
+const B1: u32 = 715094163; /* B1 = (1023-1023/3-0.03306235651)*2**20 */
+const B2: u32 = 696219795; /* B2 = (1023-1023/3-54/3-0.03306235651)*2**20 */
+
+/* |1/cbrt(x) - p(x)| < 2**-23.5 (~[-7.93e-8, 7.929e-8]). */
+const P0: f64 = 1.87595182427177009643; /* 0x3ffe03e6, 0x0f61e692 */
+const P1: f64 = -1.88497979543377169875; /* 0xbffe28e0, 0x92f02420 */
+const P2: f64 = 1.621429720105354466140; /* 0x3ff9f160, 0x4a49d6c2 */
+const P3: f64 = -0.758397934778766047437; /* 0xbfe844cb, 0xbee751d9 */
+const P4: f64 = 0.145996192886612446982; /* 0x3fc2b000, 0xd4e4edd7 */
+
+// Cube root (f64)
+///
+/// Computes the cube root of the argument.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn cbrt(x: f64) -> f64 {
+    let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
+
+    let mut ui: u64 = x.to_bits();
+    let mut r: f64;
+    let s: f64;
+    let mut t: f64;
+    let w: f64;
+    let mut hx: u32 = (ui >> 32) as u32 & 0x7fffffff;
+
+    if hx >= 0x7ff00000 {
+        /* cbrt(NaN,INF) is itself */
+        return x + x;
+    }
+
+    /*
+     * Rough cbrt to 5 bits:
+     *    cbrt(2**e*(1+m) ~= 2**(e/3)*(1+(e%3+m)/3)
+     * where e is integral and >= 0, m is real and in [0, 1), and "/" and
+     * "%" are integer division and modulus with rounding towards minus
+     * infinity.  The RHS is always >= the LHS and has a maximum relative
+     * error of about 1 in 16.  Adding a bias of -0.03306235651 to the
+     * (e%3+m)/3 term reduces the error to about 1 in 32. With the IEEE
+     * floating point representation, for finite positive normal values,
+     * ordinary integer divison of the value in bits magically gives
+     * almost exactly the RHS of the above provided we first subtract the
+     * exponent bias (1023 for doubles) and later add it back.  We do the
+     * subtraction virtually to keep e >= 0 so that ordinary integer
+     * division rounds towards minus infinity; this is also efficient.
+     */
+    if hx < 0x00100000 {
+        /* zero or subnormal? */
+        ui = (x * x1p54).to_bits();
+        hx = (ui >> 32) as u32 & 0x7fffffff;
+        if hx == 0 {
+            return x; /* cbrt(0) is itself */
+        }
+        hx = hx / 3 + B2;
+    } else {
+        hx = hx / 3 + B1;
+    }
+    ui &= 1 << 63;
+    ui |= (hx as u64) << 32;
+    t = f64::from_bits(ui);
+
+    /*
+     * New cbrt to 23 bits:
+     *    cbrt(x) = t*cbrt(x/t**3) ~= t*P(t**3/x)
+     * where P(r) is a polynomial of degree 4 that approximates 1/cbrt(r)
+     * to within 2**-23.5 when |r - 1| < 1/10.  The rough approximation
+     * has produced t such than |t/cbrt(x) - 1| ~< 1/32, and cubing this
+     * gives us bounds for r = t**3/x.
+     *
+     * Try to optimize for parallel evaluation as in __tanf.c.
+     */
+    r = (t * t) * (t / x);
+    t = t * ((P0 + r * (P1 + r * P2)) + ((r * r) * r) * (P3 + r * P4));
+
+    /*
+     * Round t away from zero to 23 bits (sloppily except for ensuring that
+     * the result is larger in magnitude than cbrt(x) but not much more than
+     * 2 23-bit ulps larger).  With rounding towards zero, the error bound
+     * would be ~5/6 instead of ~4/6.  With a maximum error of 2 23-bit ulps
+     * in the rounded t, the infinite-precision error in the Newton
+     * approximation barely affects third digit in the final error
+     * 0.667; the error in the rounded t can be up to about 3 23-bit ulps
+     * before the final error is larger than 0.667 ulps.
+     */
+    ui = t.to_bits();
+    ui = (ui + 0x80000000) & 0xffffffffc0000000;
+    t = f64::from_bits(ui);
+
+    /* one step Newton iteration to 53 bits with error < 0.667 ulps */
+    s = t * t; /* t*t is exact */
+    r = x / s; /* error <= 0.5 ulps; |r| < |t| */
+    w = t + t; /* t+t is exact */
+    r = (r - t) / (w + r); /* r-t is exact; w+r ~= 3*t */
+    t = t + t * r; /* error <= 0.5 + 0.5/3 + epsilon */
+    t
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/cbrtf.rs.html b/src/libm/math/cbrtf.rs.html new file mode 100644 index 000000000..9e828af05 --- /dev/null +++ b/src/libm/math/cbrtf.rs.html @@ -0,0 +1,155 @@ +cbrtf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_cbrtf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Debugged and optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* cbrtf(x)
+ * Return cube root of x
+ */
+
+use core::f32;
+
+const B1: u32 = 709958130; /* B1 = (127-127.0/3-0.03306235651)*2**23 */
+const B2: u32 = 642849266; /* B2 = (127-127.0/3-24/3-0.03306235651)*2**23 */
+
+/// Cube root (f32)
+///
+/// Computes the cube root of the argument.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn cbrtf(x: f32) -> f32 {
+    let x1p24 = f32::from_bits(0x4b800000); // 0x1p24f === 2 ^ 24
+
+    let mut r: f64;
+    let mut t: f64;
+    let mut ui: u32 = x.to_bits();
+    let mut hx: u32 = ui & 0x7fffffff;
+
+    if hx >= 0x7f800000 {
+        /* cbrt(NaN,INF) is itself */
+        return x + x;
+    }
+
+    /* rough cbrt to 5 bits */
+    if hx < 0x00800000 {
+        /* zero or subnormal? */
+        if hx == 0 {
+            return x; /* cbrt(+-0) is itself */
+        }
+        ui = (x * x1p24).to_bits();
+        hx = ui & 0x7fffffff;
+        hx = hx / 3 + B2;
+    } else {
+        hx = hx / 3 + B1;
+    }
+    ui &= 0x80000000;
+    ui |= hx;
+
+    /*
+     * First step Newton iteration (solving t*t-x/t == 0) to 16 bits.  In
+     * double precision so that its terms can be arranged for efficiency
+     * without causing overflow or underflow.
+     */
+    t = f32::from_bits(ui) as f64;
+    r = t * t * t;
+    t = t * (x as f64 + x as f64 + r) / (x as f64 + r + r);
+
+    /*
+     * Second step Newton iteration to 47 bits.  In double precision for
+     * efficiency and accuracy.
+     */
+    r = t * t * t;
+    t = t * (x as f64 + x as f64 + r) / (x as f64 + r + r);
+
+    /* rounding to 24 bits is perfect in round-to-nearest mode */
+    t as f32
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/ceil.rs.html b/src/libm/math/ceil.rs.html new file mode 100644 index 000000000..1402f7638 --- /dev/null +++ b/src/libm/math/ceil.rs.html @@ -0,0 +1,105 @@ +ceil.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+
+use core::f64;
+
+const TOINT: f64 = 1. / f64::EPSILON;
+
+/// Ceil (f64)
+///
+/// Finds the nearest integer greater than or equal to `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn ceil(x: f64) -> f64 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f64.ceil` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::ceilf64(x) }
+        }
+    }
+    let u: u64 = x.to_bits();
+    let e: i64 = (u >> 52 & 0x7ff) as i64;
+    let y: f64;
+
+    if e >= 0x3ff + 52 || x == 0. {
+        return x;
+    }
+    // y = int(x) - x, where int(x) is an integer neighbor of x
+    y = if (u >> 63) != 0 {
+        x - TOINT + TOINT - x
+    } else {
+        x + TOINT - TOINT - x
+    };
+    // special case because of non-nearest rounding modes
+    if e < 0x3ff {
+        force_eval!(y);
+        return if (u >> 63) != 0 { -0. } else { 1. };
+    }
+    if y < 0. {
+        x + y + 1.
+    } else {
+        x + y
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn sanity_check() {
+        assert_eq!(super::ceil(1.1), 2.0);
+        assert_eq!(super::ceil(2.9), 3.0);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/ceilf.rs.html b/src/libm/math/ceilf.rs.html new file mode 100644 index 000000000..b0ffe9308 --- /dev/null +++ b/src/libm/math/ceilf.rs.html @@ -0,0 +1,87 @@ +ceilf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+
+use core::f32;
+
+/// Ceil (f32)
+///
+/// Finds the nearest integer greater than or equal to `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn ceilf(x: f32) -> f32 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f32.ceil` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::ceilf32(x) }
+        }
+    }
+    let mut ui = x.to_bits();
+    let e = (((ui >> 23) & 0xff).wrapping_sub(0x7f)) as i32;
+
+    if e >= 23 {
+        return x;
+    }
+    if e >= 0 {
+        let m = 0x007fffff >> e;
+        if (ui & m) == 0 {
+            return x;
+        }
+        force_eval!(x + f32::from_bits(0x7b800000));
+        if ui >> 31 == 0 {
+            ui += m;
+        }
+        ui &= !m;
+    } else {
+        force_eval!(x + f32::from_bits(0x7b800000));
+        if ui >> 31 != 0 {
+            return -0.0;
+        } else if ui << 1 != 0 {
+            return 1.0;
+        }
+    }
+    f32::from_bits(ui)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/copysign.rs.html b/src/libm/math/copysign.rs.html new file mode 100644 index 000000000..09e082e70 --- /dev/null +++ b/src/libm/math/copysign.rs.html @@ -0,0 +1,25 @@ +copysign.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+
+/// Sign of Y, magnitude of X (f64)
+///
+/// Constructs a number with the magnitude (absolute value) of its
+/// first argument, `x`, and the sign of its second argument, `y`.
+pub fn copysign(x: f64, y: f64) -> f64 {
+    let mut ux = x.to_bits();
+    let uy = y.to_bits();
+    ux &= (!0) >> 1;
+    ux |= uy & (1 << 63);
+    f64::from_bits(ux)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/copysignf.rs.html b/src/libm/math/copysignf.rs.html new file mode 100644 index 000000000..107ea2bd8 --- /dev/null +++ b/src/libm/math/copysignf.rs.html @@ -0,0 +1,25 @@ +copysignf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+
+/// Sign of Y, magnitude of X (f32)
+///
+/// Constructs a number with the magnitude (absolute value) of its
+/// first argument, `x`, and the sign of its second argument, `y`.
+pub fn copysignf(x: f32, y: f32) -> f32 {
+    let mut ux = x.to_bits();
+    let uy = y.to_bits();
+    ux &= 0x7fffffff;
+    ux |= uy & 0x80000000;
+    f32::from_bits(ux)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/cos.rs.html b/src/libm/math/cos.rs.html new file mode 100644 index 000000000..3a72e3c2e --- /dev/null +++ b/src/libm/math/cos.rs.html @@ -0,0 +1,151 @@ +cos.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+
+// origin: FreeBSD /usr/src/lib/msun/src/s_cos.c */
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+
+use super::{k_cos, k_sin, rem_pio2};
+
+// cos(x)
+// Return cosine function of x.
+//
+// kernel function:
+//      k_sin           ... sine function on [-pi/4,pi/4]
+//      k_cos           ... cosine function on [-pi/4,pi/4]
+//      rem_pio2        ... argument reduction routine
+//
+// Method.
+//      Let S,C and T denote the sin, cos and tan respectively on
+//      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+//      in [-pi/4 , +pi/4], and let n = k mod 4.
+//      We have
+//
+//          n        sin(x)      cos(x)        tan(x)
+//     ----------------------------------------------------------
+//          0          S           C             T
+//          1          C          -S            -1/T
+//          2         -S          -C             T
+//          3         -C           S            -1/T
+//     ----------------------------------------------------------
+//
+// Special cases:
+//      Let trig be any of sin, cos, or tan.
+//      trig(+-INF)  is NaN, with signals;
+//      trig(NaN)    is that NaN;
+//
+// Accuracy:
+//      TRIG(x) returns trig(x) nearly rounded
+//
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn cos(x: f64) -> f64 {
+    let ix = (f64::to_bits(x) >> 32) as u32 & 0x7fffffff;
+
+    /* |x| ~< pi/4 */
+    if ix <= 0x3fe921fb {
+        if ix < 0x3e46a09e {
+            /* if x < 2**-27 * sqrt(2) */
+            /* raise inexact if x != 0 */
+            if x as i32 == 0 {
+                return 1.0;
+            }
+        }
+        return k_cos(x, 0.0);
+    }
+
+    /* cos(Inf or NaN) is NaN */
+    if ix >= 0x7ff00000 {
+        return x - x;
+    }
+
+    /* argument reduction needed */
+    let (n, y0, y1) = rem_pio2(x);
+    match n & 3 {
+        0 => k_cos(y0, y1),
+        1 => -k_sin(y0, y1, 1),
+        2 => -k_cos(y0, y1),
+        _ => k_sin(y0, y1, 1),
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/cosf.rs.html b/src/libm/math/cosf.rs.html new file mode 100644 index 000000000..133d7bae9 --- /dev/null +++ b/src/libm/math/cosf.rs.html @@ -0,0 +1,171 @@ +cosf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_cosf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{k_cosf, k_sinf, rem_pio2f};
+
+use core::f64::consts::FRAC_PI_2;
+
+/* Small multiples of pi/2 rounded to double precision. */
+const C1_PIO2: f64 = 1. * FRAC_PI_2; /* 0x3FF921FB, 0x54442D18 */
+const C2_PIO2: f64 = 2. * FRAC_PI_2; /* 0x400921FB, 0x54442D18 */
+const C3_PIO2: f64 = 3. * FRAC_PI_2; /* 0x4012D97C, 0x7F3321D2 */
+const C4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn cosf(x: f32) -> f32 {
+    let x64 = x as f64;
+
+    let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
+
+    let mut ix = x.to_bits();
+    let sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+
+    if ix <= 0x3f490fda {
+        /* |x| ~<= pi/4 */
+        if ix < 0x39800000 {
+            /* |x| < 2**-12 */
+            /* raise inexact if x != 0 */
+            force_eval!(x + x1p120);
+            return 1.;
+        }
+        return k_cosf(x64);
+    }
+    if ix <= 0x407b53d1 {
+        /* |x| ~<= 5*pi/4 */
+        if ix > 0x4016cbe3 {
+            /* |x|  ~> 3*pi/4 */
+            return -k_cosf(if sign { x64 + C2_PIO2 } else { x64 - C2_PIO2 });
+        } else if sign {
+            return k_sinf(x64 + C1_PIO2);
+        } else {
+            return k_sinf(C1_PIO2 - x64);
+        }
+    }
+    if ix <= 0x40e231d5 {
+        /* |x| ~<= 9*pi/4 */
+        if ix > 0x40afeddf {
+            /* |x| ~> 7*pi/4 */
+            return k_cosf(if sign { x64 + C4_PIO2 } else { x64 - C4_PIO2 });
+        } else if sign {
+            return k_sinf(-x64 - C3_PIO2);
+        } else {
+            return k_sinf(x64 - C3_PIO2);
+        }
+    }
+
+    /* cos(Inf or NaN) is NaN */
+    if ix >= 0x7f800000 {
+        return x - x;
+    }
+
+    /* general argument reduction needed */
+    let (n, y) = rem_pio2f(x);
+    match n & 3 {
+        0 => k_cosf(y),
+        1 => k_sinf(-y),
+        2 => -k_cosf(y),
+        _ => k_sinf(y),
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/cosh.rs.html b/src/libm/math/cosh.rs.html new file mode 100644 index 000000000..a78092b96 --- /dev/null +++ b/src/libm/math/cosh.rs.html @@ -0,0 +1,81 @@ +cosh.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+
+use super::exp;
+use super::expm1;
+use super::k_expo2;
+
+/// Hyperbolic cosine (f64)
+///
+/// Computes the hyperbolic cosine of the argument x.
+/// Is defined as `(exp(x) + exp(-x))/2`
+/// Angles are specified in radians.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn cosh(mut x: f64) -> f64 {
+    /* |x| */
+    let mut ix = x.to_bits();
+    ix &= 0x7fffffffffffffff;
+    x = f64::from_bits(ix);
+    let w = ix >> 32;
+
+    /* |x| < log(2) */
+    if w < 0x3fe62e42 {
+        if w < 0x3ff00000 - (26 << 20) {
+            let x1p120 = f64::from_bits(0x4770000000000000);
+            force_eval!(x + x1p120);
+            return 1.;
+        }
+        let t = expm1(x); // exponential minus 1
+        return 1. + t * t / (2. * (1. + t));
+    }
+
+    /* |x| < log(DBL_MAX) */
+    if w < 0x40862e42 {
+        let t = exp(x);
+        /* note: if x>log(0x1p26) then the 1/t is not needed */
+        return 0.5 * (t + 1. / t);
+    }
+
+    /* |x| > log(DBL_MAX) or nan */
+    k_expo2(x)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/coshf.rs.html b/src/libm/math/coshf.rs.html new file mode 100644 index 000000000..d0c53e9c9 --- /dev/null +++ b/src/libm/math/coshf.rs.html @@ -0,0 +1,81 @@ +coshf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+
+use super::expf;
+use super::expm1f;
+use super::k_expo2f;
+
+/// Hyperbolic cosine (f64)
+///
+/// Computes the hyperbolic cosine of the argument x.
+/// Is defined as `(exp(x) + exp(-x))/2`
+/// Angles are specified in radians.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn coshf(mut x: f32) -> f32 {
+    let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
+
+    /* |x| */
+    let mut ix = x.to_bits();
+    ix &= 0x7fffffff;
+    x = f32::from_bits(ix);
+    let w = ix;
+
+    /* |x| < log(2) */
+    if w < 0x3f317217 {
+        if w < (0x3f800000 - (12 << 23)) {
+            force_eval!(x + x1p120);
+            return 1.;
+        }
+        let t = expm1f(x);
+        return 1. + t * t / (2. * (1. + t));
+    }
+
+    /* |x| < log(FLT_MAX) */
+    if w < 0x42b17217 {
+        let t = expf(x);
+        return 0.5 * (t + 1. / t);
+    }
+
+    /* |x| > log(FLT_MAX) or nan */
+    k_expo2f(x)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/erf.rs.html b/src/libm/math/erf.rs.html new file mode 100644 index 000000000..fcb10b406 --- /dev/null +++ b/src/libm/math/erf.rs.html @@ -0,0 +1,637 @@ +erf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+
+use super::{exp, fabs, get_high_word, with_set_low_word};
+/* origin: FreeBSD /usr/src/lib/msun/src/s_erf.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* double erf(double x)
+ * double erfc(double x)
+ *                           x
+ *                    2      |\
+ *     erf(x)  =  ---------  | exp(-t*t)dt
+ *                 sqrt(pi) \|
+ *                           0
+ *
+ *     erfc(x) =  1-erf(x)
+ *  Note that
+ *              erf(-x) = -erf(x)
+ *              erfc(-x) = 2 - erfc(x)
+ *
+ * Method:
+ *      1. For |x| in [0, 0.84375]
+ *          erf(x)  = x + x*R(x^2)
+ *          erfc(x) = 1 - erf(x)           if x in [-.84375,0.25]
+ *                  = 0.5 + ((0.5-x)-x*R)  if x in [0.25,0.84375]
+ *         where R = P/Q where P is an odd poly of degree 8 and
+ *         Q is an odd poly of degree 10.
+ *                                               -57.90
+ *                      | R - (erf(x)-x)/x | <= 2
+ *
+ *
+ *         Remark. The formula is derived by noting
+ *          erf(x) = (2/sqrt(pi))*(x - x^3/3 + x^5/10 - x^7/42 + ....)
+ *         and that
+ *          2/sqrt(pi) = 1.128379167095512573896158903121545171688
+ *         is close to one. The interval is chosen because the fix
+ *         point of erf(x) is near 0.6174 (i.e., erf(x)=x when x is
+ *         near 0.6174), and by some experiment, 0.84375 is chosen to
+ *         guarantee the error is less than one ulp for erf.
+ *
+ *      2. For |x| in [0.84375,1.25], let s = |x| - 1, and
+ *         c = 0.84506291151 rounded to single (24 bits)
+ *              erf(x)  = sign(x) * (c  + P1(s)/Q1(s))
+ *              erfc(x) = (1-c)  - P1(s)/Q1(s) if x > 0
+ *                        1+(c+P1(s)/Q1(s))    if x < 0
+ *              |P1/Q1 - (erf(|x|)-c)| <= 2**-59.06
+ *         Remark: here we use the taylor series expansion at x=1.
+ *              erf(1+s) = erf(1) + s*Poly(s)
+ *                       = 0.845.. + P1(s)/Q1(s)
+ *         That is, we use rational approximation to approximate
+ *                      erf(1+s) - (c = (single)0.84506291151)
+ *         Note that |P1/Q1|< 0.078 for x in [0.84375,1.25]
+ *         where
+ *              P1(s) = degree 6 poly in s
+ *              Q1(s) = degree 6 poly in s
+ *
+ *      3. For x in [1.25,1/0.35(~2.857143)],
+ *              erfc(x) = (1/x)*exp(-x*x-0.5625+R1/S1)
+ *              erf(x)  = 1 - erfc(x)
+ *         where
+ *              R1(z) = degree 7 poly in z, (z=1/x^2)
+ *              S1(z) = degree 8 poly in z
+ *
+ *      4. For x in [1/0.35,28]
+ *              erfc(x) = (1/x)*exp(-x*x-0.5625+R2/S2) if x > 0
+ *                      = 2.0 - (1/x)*exp(-x*x-0.5625+R2/S2) if -6<x<0
+ *                      = 2.0 - tiny            (if x <= -6)
+ *              erf(x)  = sign(x)*(1.0 - erfc(x)) if x < 6, else
+ *              erf(x)  = sign(x)*(1.0 - tiny)
+ *         where
+ *              R2(z) = degree 6 poly in z, (z=1/x^2)
+ *              S2(z) = degree 7 poly in z
+ *
+ *      Note1:
+ *         To compute exp(-x*x-0.5625+R/S), let s be a single
+ *         precision number and s := x; then
+ *              -x*x = -s*s + (s-x)*(s+x)
+ *              exp(-x*x-0.5626+R/S) =
+ *                      exp(-s*s-0.5625)*exp((s-x)*(s+x)+R/S);
+ *      Note2:
+ *         Here 4 and 5 make use of the asymptotic series
+ *                        exp(-x*x)
+ *              erfc(x) ~ ---------- * ( 1 + Poly(1/x^2) )
+ *                        x*sqrt(pi)
+ *         We use rational approximation to approximate
+ *              g(s)=f(1/x^2) = log(erfc(x)*x) - x*x + 0.5625
+ *         Here is the error bound for R1/S1 and R2/S2
+ *              |R1/S1 - f(x)|  < 2**(-62.57)
+ *              |R2/S2 - f(x)|  < 2**(-61.52)
+ *
+ *      5. For inf > x >= 28
+ *              erf(x)  = sign(x) *(1 - tiny)  (raise inexact)
+ *              erfc(x) = tiny*tiny (raise underflow) if x > 0
+ *                      = 2 - tiny if x<0
+ *
+ *      7. Special case:
+ *              erf(0)  = 0, erf(inf)  = 1, erf(-inf) = -1,
+ *              erfc(0) = 1, erfc(inf) = 0, erfc(-inf) = 2,
+ *              erfc/erf(NaN) is NaN
+ */
+
+const ERX: f64 = 8.45062911510467529297e-01; /* 0x3FEB0AC1, 0x60000000 */
+/*
+ * Coefficients for approximation to  erf on [0,0.84375]
+ */
+const EFX8: f64 = 1.02703333676410069053e+00; /* 0x3FF06EBA, 0x8214DB69 */
+const PP0: f64 = 1.28379167095512558561e-01; /* 0x3FC06EBA, 0x8214DB68 */
+const PP1: f64 = -3.25042107247001499370e-01; /* 0xBFD4CD7D, 0x691CB913 */
+const PP2: f64 = -2.84817495755985104766e-02; /* 0xBF9D2A51, 0xDBD7194F */
+const PP3: f64 = -5.77027029648944159157e-03; /* 0xBF77A291, 0x236668E4 */
+const PP4: f64 = -2.37630166566501626084e-05; /* 0xBEF8EAD6, 0x120016AC */
+const QQ1: f64 = 3.97917223959155352819e-01; /* 0x3FD97779, 0xCDDADC09 */
+const QQ2: f64 = 6.50222499887672944485e-02; /* 0x3FB0A54C, 0x5536CEBA */
+const QQ3: f64 = 5.08130628187576562776e-03; /* 0x3F74D022, 0xC4D36B0F */
+const QQ4: f64 = 1.32494738004321644526e-04; /* 0x3F215DC9, 0x221C1A10 */
+const QQ5: f64 = -3.96022827877536812320e-06; /* 0xBED09C43, 0x42A26120 */
+/*
+ * Coefficients for approximation to  erf  in [0.84375,1.25]
+ */
+const PA0: f64 = -2.36211856075265944077e-03; /* 0xBF6359B8, 0xBEF77538 */
+const PA1: f64 = 4.14856118683748331666e-01; /* 0x3FDA8D00, 0xAD92B34D */
+const PA2: f64 = -3.72207876035701323847e-01; /* 0xBFD7D240, 0xFBB8C3F1 */
+const PA3: f64 = 3.18346619901161753674e-01; /* 0x3FD45FCA, 0x805120E4 */
+const PA4: f64 = -1.10894694282396677476e-01; /* 0xBFBC6398, 0x3D3E28EC */
+const PA5: f64 = 3.54783043256182359371e-02; /* 0x3FA22A36, 0x599795EB */
+const PA6: f64 = -2.16637559486879084300e-03; /* 0xBF61BF38, 0x0A96073F */
+const QA1: f64 = 1.06420880400844228286e-01; /* 0x3FBB3E66, 0x18EEE323 */
+const QA2: f64 = 5.40397917702171048937e-01; /* 0x3FE14AF0, 0x92EB6F33 */
+const QA3: f64 = 7.18286544141962662868e-02; /* 0x3FB2635C, 0xD99FE9A7 */
+const QA4: f64 = 1.26171219808761642112e-01; /* 0x3FC02660, 0xE763351F */
+const QA5: f64 = 1.36370839120290507362e-02; /* 0x3F8BEDC2, 0x6B51DD1C */
+const QA6: f64 = 1.19844998467991074170e-02; /* 0x3F888B54, 0x5735151D */
+/*
+ * Coefficients for approximation to  erfc in [1.25,1/0.35]
+ */
+const RA0: f64 = -9.86494403484714822705e-03; /* 0xBF843412, 0x600D6435 */
+const RA1: f64 = -6.93858572707181764372e-01; /* 0xBFE63416, 0xE4BA7360 */
+const RA2: f64 = -1.05586262253232909814e+01; /* 0xC0251E04, 0x41B0E726 */
+const RA3: f64 = -6.23753324503260060396e+01; /* 0xC04F300A, 0xE4CBA38D */
+const RA4: f64 = -1.62396669462573470355e+02; /* 0xC0644CB1, 0x84282266 */
+const RA5: f64 = -1.84605092906711035994e+02; /* 0xC067135C, 0xEBCCABB2 */
+const RA6: f64 = -8.12874355063065934246e+01; /* 0xC0545265, 0x57E4D2F2 */
+const RA7: f64 = -9.81432934416914548592e+00; /* 0xC023A0EF, 0xC69AC25C */
+const SA1: f64 = 1.96512716674392571292e+01; /* 0x4033A6B9, 0xBD707687 */
+const SA2: f64 = 1.37657754143519042600e+02; /* 0x4061350C, 0x526AE721 */
+const SA3: f64 = 4.34565877475229228821e+02; /* 0x407B290D, 0xD58A1A71 */
+const SA4: f64 = 6.45387271733267880336e+02; /* 0x40842B19, 0x21EC2868 */
+const SA5: f64 = 4.29008140027567833386e+02; /* 0x407AD021, 0x57700314 */
+const SA6: f64 = 1.08635005541779435134e+02; /* 0x405B28A3, 0xEE48AE2C */
+const SA7: f64 = 6.57024977031928170135e+00; /* 0x401A47EF, 0x8E484A93 */
+const SA8: f64 = -6.04244152148580987438e-02; /* 0xBFAEEFF2, 0xEE749A62 */
+/*
+ * Coefficients for approximation to  erfc in [1/.35,28]
+ */
+const RB0: f64 = -9.86494292470009928597e-03; /* 0xBF843412, 0x39E86F4A */
+const RB1: f64 = -7.99283237680523006574e-01; /* 0xBFE993BA, 0x70C285DE */
+const RB2: f64 = -1.77579549177547519889e+01; /* 0xC031C209, 0x555F995A */
+const RB3: f64 = -1.60636384855821916062e+02; /* 0xC064145D, 0x43C5ED98 */
+const RB4: f64 = -6.37566443368389627722e+02; /* 0xC083EC88, 0x1375F228 */
+const RB5: f64 = -1.02509513161107724954e+03; /* 0xC0900461, 0x6A2E5992 */
+const RB6: f64 = -4.83519191608651397019e+02; /* 0xC07E384E, 0x9BDC383F */
+const SB1: f64 = 3.03380607434824582924e+01; /* 0x403E568B, 0x261D5190 */
+const SB2: f64 = 3.25792512996573918826e+02; /* 0x40745CAE, 0x221B9F0A */
+const SB3: f64 = 1.53672958608443695994e+03; /* 0x409802EB, 0x189D5118 */
+const SB4: f64 = 3.19985821950859553908e+03; /* 0x40A8FFB7, 0x688C246A */
+const SB5: f64 = 2.55305040643316442583e+03; /* 0x40A3F219, 0xCEDF3BE6 */
+const SB6: f64 = 4.74528541206955367215e+02; /* 0x407DA874, 0xE79FE763 */
+const SB7: f64 = -2.24409524465858183362e+01; /* 0xC03670E2, 0x42712D62 */
+
+fn erfc1(x: f64) -> f64 {
+    let s: f64;
+    let p: f64;
+    let q: f64;
+
+    s = fabs(x) - 1.0;
+    p = PA0 + s * (PA1 + s * (PA2 + s * (PA3 + s * (PA4 + s * (PA5 + s * PA6)))));
+    q = 1.0 + s * (QA1 + s * (QA2 + s * (QA3 + s * (QA4 + s * (QA5 + s * QA6)))));
+
+    1.0 - ERX - p / q
+}
+
+fn erfc2(ix: u32, mut x: f64) -> f64 {
+    let s: f64;
+    let r: f64;
+    let big_s: f64;
+    let z: f64;
+
+    if ix < 0x3ff40000 {
+        /* |x| < 1.25 */
+        return erfc1(x);
+    }
+
+    x = fabs(x);
+    s = 1.0 / (x * x);
+    if ix < 0x4006db6d {
+        /* |x| < 1/.35 ~ 2.85714 */
+        r = RA0 + s * (RA1 + s * (RA2 + s * (RA3 + s * (RA4 + s * (RA5 + s * (RA6 + s * RA7))))));
+        big_s = 1.0
+            + s * (SA1
+                + s * (SA2 + s * (SA3 + s * (SA4 + s * (SA5 + s * (SA6 + s * (SA7 + s * SA8)))))));
+    } else {
+        /* |x| > 1/.35 */
+        r = RB0 + s * (RB1 + s * (RB2 + s * (RB3 + s * (RB4 + s * (RB5 + s * RB6)))));
+        big_s =
+            1.0 + s * (SB1 + s * (SB2 + s * (SB3 + s * (SB4 + s * (SB5 + s * (SB6 + s * SB7))))));
+    }
+    z = with_set_low_word(x, 0);
+
+    exp(-z * z - 0.5625) * exp((z - x) * (z + x) + r / big_s) / x
+}
+
+/// Error function (f64)
+///
+/// Calculates an approximation to the “error function”, which estimates
+/// the probability that an observation will fall within x standard
+/// deviations of the mean (assuming a normal distribution).
+pub fn erf(x: f64) -> f64 {
+    let r: f64;
+    let s: f64;
+    let z: f64;
+    let y: f64;
+    let mut ix: u32;
+    let sign: usize;
+
+    ix = get_high_word(x);
+    sign = (ix >> 31) as usize;
+    ix &= 0x7fffffff;
+    if ix >= 0x7ff00000 {
+        /* erf(nan)=nan, erf(+-inf)=+-1 */
+        return 1.0 - 2.0 * (sign as f64) + 1.0 / x;
+    }
+    if ix < 0x3feb0000 {
+        /* |x| < 0.84375 */
+        if ix < 0x3e300000 {
+            /* |x| < 2**-28 */
+            /* avoid underflow */
+            return 0.125 * (8.0 * x + EFX8 * x);
+        }
+        z = x * x;
+        r = PP0 + z * (PP1 + z * (PP2 + z * (PP3 + z * PP4)));
+        s = 1.0 + z * (QQ1 + z * (QQ2 + z * (QQ3 + z * (QQ4 + z * QQ5))));
+        y = r / s;
+        return x + x * y;
+    }
+    if ix < 0x40180000 {
+        /* 0.84375 <= |x| < 6 */
+        y = 1.0 - erfc2(ix, x);
+    } else {
+        let x1p_1022 = f64::from_bits(0x0010000000000000);
+        y = 1.0 - x1p_1022;
+    }
+
+    if sign != 0 {
+        -y
+    } else {
+        y
+    }
+}
+
+/// Error function (f64)
+///
+/// Calculates the complementary probability.
+/// Is `1 - erf(x)`. Is computed directly, so that you can use it to avoid
+/// the loss of precision that would result from subtracting
+/// large probabilities (on large `x`) from 1.
+pub fn erfc(x: f64) -> f64 {
+    let r: f64;
+    let s: f64;
+    let z: f64;
+    let y: f64;
+    let mut ix: u32;
+    let sign: usize;
+
+    ix = get_high_word(x);
+    sign = (ix >> 31) as usize;
+    ix &= 0x7fffffff;
+    if ix >= 0x7ff00000 {
+        /* erfc(nan)=nan, erfc(+-inf)=0,2 */
+        return 2.0 * (sign as f64) + 1.0 / x;
+    }
+    if ix < 0x3feb0000 {
+        /* |x| < 0.84375 */
+        if ix < 0x3c700000 {
+            /* |x| < 2**-56 */
+            return 1.0 - x;
+        }
+        z = x * x;
+        r = PP0 + z * (PP1 + z * (PP2 + z * (PP3 + z * PP4)));
+        s = 1.0 + z * (QQ1 + z * (QQ2 + z * (QQ3 + z * (QQ4 + z * QQ5))));
+        y = r / s;
+        if sign != 0 || ix < 0x3fd00000 {
+            /* x < 1/4 */
+            return 1.0 - (x + x * y);
+        }
+        return 0.5 - (x - 0.5 + x * y);
+    }
+    if ix < 0x403c0000 {
+        /* 0.84375 <= |x| < 28 */
+        if sign != 0 {
+            return 2.0 - erfc2(ix, x);
+        } else {
+            return erfc2(ix, x);
+        }
+    }
+
+    let x1p_1022 = f64::from_bits(0x0010000000000000);
+    if sign != 0 {
+        2.0 - x1p_1022
+    } else {
+        x1p_1022 * x1p_1022
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/erff.rs.html b/src/libm/math/erff.rs.html new file mode 100644 index 000000000..2299cf9f7 --- /dev/null +++ b/src/libm/math/erff.rs.html @@ -0,0 +1,461 @@ +erff.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_erff.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{expf, fabsf};
+
+const ERX: f32 = 8.4506291151e-01; /* 0x3f58560b */
+/*
+ * Coefficients for approximation to  erf on [0,0.84375]
+ */
+const EFX8: f32 = 1.0270333290e+00; /* 0x3f8375d4 */
+const PP0: f32 = 1.2837916613e-01; /* 0x3e0375d4 */
+const PP1: f32 = -3.2504209876e-01; /* 0xbea66beb */
+const PP2: f32 = -2.8481749818e-02; /* 0xbce9528f */
+const PP3: f32 = -5.7702702470e-03; /* 0xbbbd1489 */
+const PP4: f32 = -2.3763017452e-05; /* 0xb7c756b1 */
+const QQ1: f32 = 3.9791721106e-01; /* 0x3ecbbbce */
+const QQ2: f32 = 6.5022252500e-02; /* 0x3d852a63 */
+const QQ3: f32 = 5.0813062117e-03; /* 0x3ba68116 */
+const QQ4: f32 = 1.3249473704e-04; /* 0x390aee49 */
+const QQ5: f32 = -3.9602282413e-06; /* 0xb684e21a */
+/*
+ * Coefficients for approximation to  erf  in [0.84375,1.25]
+ */
+const PA0: f32 = -2.3621185683e-03; /* 0xbb1acdc6 */
+const PA1: f32 = 4.1485610604e-01; /* 0x3ed46805 */
+const PA2: f32 = -3.7220788002e-01; /* 0xbebe9208 */
+const PA3: f32 = 3.1834661961e-01; /* 0x3ea2fe54 */
+const PA4: f32 = -1.1089469492e-01; /* 0xbde31cc2 */
+const PA5: f32 = 3.5478305072e-02; /* 0x3d1151b3 */
+const PA6: f32 = -2.1663755178e-03; /* 0xbb0df9c0 */
+const QA1: f32 = 1.0642088205e-01; /* 0x3dd9f331 */
+const QA2: f32 = 5.4039794207e-01; /* 0x3f0a5785 */
+const QA3: f32 = 7.1828655899e-02; /* 0x3d931ae7 */
+const QA4: f32 = 1.2617121637e-01; /* 0x3e013307 */
+const QA5: f32 = 1.3637083583e-02; /* 0x3c5f6e13 */
+const QA6: f32 = 1.1984500103e-02; /* 0x3c445aa3 */
+/*
+ * Coefficients for approximation to  erfc in [1.25,1/0.35]
+ */
+const RA0: f32 = -9.8649440333e-03; /* 0xbc21a093 */
+const RA1: f32 = -6.9385856390e-01; /* 0xbf31a0b7 */
+const RA2: f32 = -1.0558626175e+01; /* 0xc128f022 */
+const RA3: f32 = -6.2375331879e+01; /* 0xc2798057 */
+const RA4: f32 = -1.6239666748e+02; /* 0xc322658c */
+const RA5: f32 = -1.8460508728e+02; /* 0xc3389ae7 */
+const RA6: f32 = -8.1287437439e+01; /* 0xc2a2932b */
+const RA7: f32 = -9.8143291473e+00; /* 0xc11d077e */
+const SA1: f32 = 1.9651271820e+01; /* 0x419d35ce */
+const SA2: f32 = 1.3765776062e+02; /* 0x4309a863 */
+const SA3: f32 = 4.3456588745e+02; /* 0x43d9486f */
+const SA4: f32 = 6.4538726807e+02; /* 0x442158c9 */
+const SA5: f32 = 4.2900814819e+02; /* 0x43d6810b */
+const SA6: f32 = 1.0863500214e+02; /* 0x42d9451f */
+const SA7: f32 = 6.5702495575e+00; /* 0x40d23f7c */
+const SA8: f32 = -6.0424413532e-02; /* 0xbd777f97 */
+/*
+ * Coefficients for approximation to  erfc in [1/.35,28]
+ */
+const RB0: f32 = -9.8649431020e-03; /* 0xbc21a092 */
+const RB1: f32 = -7.9928326607e-01; /* 0xbf4c9dd4 */
+const RB2: f32 = -1.7757955551e+01; /* 0xc18e104b */
+const RB3: f32 = -1.6063638306e+02; /* 0xc320a2ea */
+const RB4: f32 = -6.3756646729e+02; /* 0xc41f6441 */
+const RB5: f32 = -1.0250950928e+03; /* 0xc480230b */
+const RB6: f32 = -4.8351919556e+02; /* 0xc3f1c275 */
+const SB1: f32 = 3.0338060379e+01; /* 0x41f2b459 */
+const SB2: f32 = 3.2579251099e+02; /* 0x43a2e571 */
+const SB3: f32 = 1.5367296143e+03; /* 0x44c01759 */
+const SB4: f32 = 3.1998581543e+03; /* 0x4547fdbb */
+const SB5: f32 = 2.5530502930e+03; /* 0x451f90ce */
+const SB6: f32 = 4.7452853394e+02; /* 0x43ed43a7 */
+const SB7: f32 = -2.2440952301e+01; /* 0xc1b38712 */
+
+fn erfc1(x: f32) -> f32 {
+    let s: f32;
+    let p: f32;
+    let q: f32;
+
+    s = fabsf(x) - 1.0;
+    p = PA0 + s * (PA1 + s * (PA2 + s * (PA3 + s * (PA4 + s * (PA5 + s * PA6)))));
+    q = 1.0 + s * (QA1 + s * (QA2 + s * (QA3 + s * (QA4 + s * (QA5 + s * QA6)))));
+    return 1.0 - ERX - p / q;
+}
+
+fn erfc2(mut ix: u32, mut x: f32) -> f32 {
+    let s: f32;
+    let r: f32;
+    let big_s: f32;
+    let z: f32;
+
+    if ix < 0x3fa00000 {
+        /* |x| < 1.25 */
+        return erfc1(x);
+    }
+
+    x = fabsf(x);
+    s = 1.0 / (x * x);
+    if ix < 0x4036db6d {
+        /* |x| < 1/0.35 */
+        r = RA0 + s * (RA1 + s * (RA2 + s * (RA3 + s * (RA4 + s * (RA5 + s * (RA6 + s * RA7))))));
+        big_s = 1.0
+            + s * (SA1
+                + s * (SA2 + s * (SA3 + s * (SA4 + s * (SA5 + s * (SA6 + s * (SA7 + s * SA8)))))));
+    } else {
+        /* |x| >= 1/0.35 */
+        r = RB0 + s * (RB1 + s * (RB2 + s * (RB3 + s * (RB4 + s * (RB5 + s * RB6)))));
+        big_s =
+            1.0 + s * (SB1 + s * (SB2 + s * (SB3 + s * (SB4 + s * (SB5 + s * (SB6 + s * SB7))))));
+    }
+    ix = x.to_bits();
+    z = f32::from_bits(ix & 0xffffe000);
+
+    expf(-z * z - 0.5625) * expf((z - x) * (z + x) + r / big_s) / x
+}
+
+/// Error function (f32)
+///
+/// Calculates an approximation to the “error function”, which estimates
+/// the probability that an observation will fall within x standard
+/// deviations of the mean (assuming a normal distribution).
+pub fn erff(x: f32) -> f32 {
+    let r: f32;
+    let s: f32;
+    let z: f32;
+    let y: f32;
+    let mut ix: u32;
+    let sign: usize;
+
+    ix = x.to_bits();
+    sign = (ix >> 31) as usize;
+    ix &= 0x7fffffff;
+    if ix >= 0x7f800000 {
+        /* erf(nan)=nan, erf(+-inf)=+-1 */
+        return 1.0 - 2.0 * (sign as f32) + 1.0 / x;
+    }
+    if ix < 0x3f580000 {
+        /* |x| < 0.84375 */
+        if ix < 0x31800000 {
+            /* |x| < 2**-28 */
+            /*avoid underflow */
+            return 0.125 * (8.0 * x + EFX8 * x);
+        }
+        z = x * x;
+        r = PP0 + z * (PP1 + z * (PP2 + z * (PP3 + z * PP4)));
+        s = 1.0 + z * (QQ1 + z * (QQ2 + z * (QQ3 + z * (QQ4 + z * QQ5))));
+        y = r / s;
+        return x + x * y;
+    }
+    if ix < 0x40c00000 {
+        /* |x| < 6 */
+        y = 1.0 - erfc2(ix, x);
+    } else {
+        let x1p_120 = f32::from_bits(0x03800000);
+        y = 1.0 - x1p_120;
+    }
+
+    if sign != 0 {
+        -y
+    } else {
+        y
+    }
+}
+
+/// Error function (f32)
+///
+/// Calculates the complementary probability.
+/// Is `1 - erf(x)`. Is computed directly, so that you can use it to avoid
+/// the loss of precision that would result from subtracting
+/// large probabilities (on large `x`) from 1.
+pub fn erfcf(x: f32) -> f32 {
+    let r: f32;
+    let s: f32;
+    let z: f32;
+    let y: f32;
+    let mut ix: u32;
+    let sign: usize;
+
+    ix = x.to_bits();
+    sign = (ix >> 31) as usize;
+    ix &= 0x7fffffff;
+    if ix >= 0x7f800000 {
+        /* erfc(nan)=nan, erfc(+-inf)=0,2 */
+        return 2.0 * (sign as f32) + 1.0 / x;
+    }
+
+    if ix < 0x3f580000 {
+        /* |x| < 0.84375 */
+        if ix < 0x23800000 {
+            /* |x| < 2**-56 */
+            return 1.0 - x;
+        }
+        z = x * x;
+        r = PP0 + z * (PP1 + z * (PP2 + z * (PP3 + z * PP4)));
+        s = 1.0 + z * (QQ1 + z * (QQ2 + z * (QQ3 + z * (QQ4 + z * QQ5))));
+        y = r / s;
+        if sign != 0 || ix < 0x3e800000 {
+            /* x < 1/4 */
+            return 1.0 - (x + x * y);
+        }
+        return 0.5 - (x - 0.5 + x * y);
+    }
+    if ix < 0x41e00000 {
+        /* |x| < 28 */
+        if sign != 0 {
+            return 2.0 - erfc2(ix, x);
+        } else {
+            return erfc2(ix, x);
+        }
+    }
+
+    let x1p_120 = f32::from_bits(0x03800000);
+    if sign != 0 {
+        2.0 - x1p_120
+    } else {
+        x1p_120 * x1p_120
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/exp.rs.html b/src/libm/math/exp.rs.html new file mode 100644 index 000000000..347e201c7 --- /dev/null +++ b/src/libm/math/exp.rs.html @@ -0,0 +1,313 @@ +exp.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_exp.c */
+/*
+ * ====================================================
+ * Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* exp(x)
+ * Returns the exponential of x.
+ *
+ * Method
+ *   1. Argument reduction:
+ *      Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+ *      Given x, find r and integer k such that
+ *
+ *               x = k*ln2 + r,  |r| <= 0.5*ln2.
+ *
+ *      Here r will be represented as r = hi-lo for better
+ *      accuracy.
+ *
+ *   2. Approximation of exp(r) by a special rational function on
+ *      the interval [0,0.34658]:
+ *      Write
+ *          R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+ *      We use a special Remez algorithm on [0,0.34658] to generate
+ *      a polynomial of degree 5 to approximate R. The maximum error
+ *      of this polynomial approximation is bounded by 2**-59. In
+ *      other words,
+ *          R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+ *      (where z=r*r, and the values of P1 to P5 are listed below)
+ *      and
+ *          |                  5          |     -59
+ *          | 2.0+P1*z+...+P5*z   -  R(z) | <= 2
+ *          |                             |
+ *      The computation of exp(r) thus becomes
+ *                              2*r
+ *              exp(r) = 1 + ----------
+ *                            R(r) - r
+ *                                 r*c(r)
+ *                     = 1 + r + ----------- (for better accuracy)
+ *                                2 - c(r)
+ *      where
+ *                              2       4             10
+ *              c(r) = r - (P1*r  + P2*r  + ... + P5*r   ).
+ *
+ *   3. Scale back to obtain exp(x):
+ *      From step 1, we have
+ *         exp(x) = 2^k * exp(r)
+ *
+ * Special cases:
+ *      exp(INF) is INF, exp(NaN) is NaN;
+ *      exp(-INF) is 0, and
+ *      for finite argument, only exp(0)=1 is exact.
+ *
+ * Accuracy:
+ *      according to an error analysis, the error is always less than
+ *      1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ *      For IEEE double
+ *          if x >  709.782712893383973096 then exp(x) overflows
+ *          if x < -745.133219101941108420 then exp(x) underflows
+ */
+
+use super::scalbn;
+
+const HALF: [f64; 2] = [0.5, -0.5];
+const LN2HI: f64 = 6.93147180369123816490e-01; /* 0x3fe62e42, 0xfee00000 */
+const LN2LO: f64 = 1.90821492927058770002e-10; /* 0x3dea39ef, 0x35793c76 */
+const INVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547, 0x652b82fe */
+const P1: f64 = 1.66666666666666019037e-01; /* 0x3FC55555, 0x5555553E */
+const P2: f64 = -2.77777777770155933842e-03; /* 0xBF66C16C, 0x16BEBD93 */
+const P3: f64 = 6.61375632143793436117e-05; /* 0x3F11566A, 0xAF25DE2C */
+const P4: f64 = -1.65339022054652515390e-06; /* 0xBEBBBD41, 0xC5D26BF1 */
+const P5: f64 = 4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
+
+/// Exponential, base *e* (f64)
+///
+/// Calculate the exponential of `x`, that is, *e* raised to the power `x`
+/// (where *e* is the base of the natural system of logarithms, approximately 2.71828).
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn exp(mut x: f64) -> f64 {
+    let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023
+    let x1p_149 = f64::from_bits(0x36a0000000000000); // 0x1p-149 === 2 ^ -149
+
+    let hi: f64;
+    let lo: f64;
+    let c: f64;
+    let xx: f64;
+    let y: f64;
+    let k: i32;
+    let sign: i32;
+    let mut hx: u32;
+
+    hx = (x.to_bits() >> 32) as u32;
+    sign = (hx >> 31) as i32;
+    hx &= 0x7fffffff; /* high word of |x| */
+
+    /* special cases */
+    if hx >= 0x4086232b {
+        /* if |x| >= 708.39... */
+        if x.is_nan() {
+            return x;
+        }
+        if x > 709.782712893383973096 {
+            /* overflow if x!=inf */
+            x *= x1p1023;
+            return x;
+        }
+        if x < -708.39641853226410622 {
+            /* underflow if x!=-inf */
+            force_eval!((-x1p_149 / x) as f32);
+            if x < -745.13321910194110842 {
+                return 0.;
+            }
+        }
+    }
+
+    /* argument reduction */
+    if hx > 0x3fd62e42 {
+        /* if |x| > 0.5 ln2 */
+        if hx >= 0x3ff0a2b2 {
+            /* if |x| >= 1.5 ln2 */
+            k = (INVLN2 * x + HALF[sign as usize]) as i32;
+        } else {
+            k = 1 - sign - sign;
+        }
+        hi = x - k as f64 * LN2HI; /* k*ln2hi is exact here */
+        lo = k as f64 * LN2LO;
+        x = hi - lo;
+    } else if hx > 0x3e300000 {
+        /* if |x| > 2**-28 */
+        k = 0;
+        hi = x;
+        lo = 0.;
+    } else {
+        /* inexact if x!=0 */
+        force_eval!(x1p1023 + x);
+        return 1. + x;
+    }
+
+    /* x is now in primary range */
+    xx = x * x;
+    c = x - xx * (P1 + xx * (P2 + xx * (P3 + xx * (P4 + xx * P5))));
+    y = 1. + (x * c / (2. - c) - lo + hi);
+    if k == 0 {
+        y
+    } else {
+        scalbn(y, k)
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/exp10.rs.html b/src/libm/math/exp10.rs.html new file mode 100644 index 000000000..05a2aee61 --- /dev/null +++ b/src/libm/math/exp10.rs.html @@ -0,0 +1,45 @@ +exp10.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+
+use super::{exp2, modf, pow};
+
+const LN10: f64 = 3.32192809488736234787031942948939;
+const P10: &[f64] = &[
+    1e-15, 1e-14, 1e-13, 1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1,
+    1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15,
+];
+
+pub fn exp10(x: f64) -> f64 {
+    let (mut y, n) = modf(x);
+    let u: u64 = n.to_bits();
+    /* fabs(n) < 16 without raising invalid on nan */
+    if (u >> 52 & 0x7ff) < 0x3ff + 4 {
+        if y == 0.0 {
+            return P10[((n as isize) + 15) as usize];
+        }
+        y = exp2(LN10 * y);
+        return y * P10[((n as isize) + 15) as usize];
+    }
+    return pow(10.0, x);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/exp10f.rs.html b/src/libm/math/exp10f.rs.html new file mode 100644 index 000000000..96b374553 --- /dev/null +++ b/src/libm/math/exp10f.rs.html @@ -0,0 +1,45 @@ +exp10f.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+
+use super::{exp2, exp2f, modff};
+
+const LN10_F32: f32 = 3.32192809488736234787031942948939;
+const LN10_F64: f64 = 3.32192809488736234787031942948939;
+const P10: &[f32] = &[
+    1e-7, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7,
+];
+
+pub fn exp10f(x: f32) -> f32 {
+    let (mut y, n) = modff(x);
+    let u = n.to_bits();
+    /* fabsf(n) < 8 without raising invalid on nan */
+    if (u >> 23 & 0xff) < 0x7f + 3 {
+        if y == 0.0 {
+            return P10[((n as isize) + 7) as usize];
+        }
+        y = exp2f(LN10_F32 * y);
+        return y * P10[((n as isize) + 7) as usize];
+    }
+    return exp2(LN10_F64 * (x as f64)) as f32;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/exp2.rs.html b/src/libm/math/exp2.rs.html new file mode 100644 index 000000000..32dac3461 --- /dev/null +++ b/src/libm/math/exp2.rs.html @@ -0,0 +1,793 @@ +exp2.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+
+// origin: FreeBSD /usr/src/lib/msun/src/s_exp2.c */
+//-
+// Copyright (c) 2005 David Schultz <das@FreeBSD.ORG>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+// SUCH DAMAGE.
+
+use super::scalbn;
+
+const TBLSIZE: usize = 256;
+
+#[cfg_attr(rustfmt, rustfmt_skip)]
+static TBL: [u64; TBLSIZE * 2] = [
+    //  exp2(z + eps)          eps
+    0x3fe6a09e667f3d5d, 0x3d39880000000000,
+    0x3fe6b052fa751744, 0x3cd8000000000000,
+    0x3fe6c012750bd9fe, 0xbd28780000000000,
+    0x3fe6cfdcddd476bf, 0x3d1ec00000000000,
+    0x3fe6dfb23c651a29, 0xbcd8000000000000,
+    0x3fe6ef9298593ae3, 0xbcbc000000000000,
+    0x3fe6ff7df9519386, 0xbd2fd80000000000,
+    0x3fe70f7466f42da3, 0xbd2c880000000000,
+    0x3fe71f75e8ec5fc3, 0x3d13c00000000000,
+    0x3fe72f8286eacf05, 0xbd38300000000000,
+    0x3fe73f9a48a58152, 0xbd00c00000000000,
+    0x3fe74fbd35d7ccfc, 0x3d2f880000000000,
+    0x3fe75feb564267f1, 0x3d03e00000000000,
+    0x3fe77024b1ab6d48, 0xbd27d00000000000,
+    0x3fe780694fde5d38, 0xbcdd000000000000,
+    0x3fe790b938ac1d00, 0x3ce3000000000000,
+    0x3fe7a11473eb0178, 0xbced000000000000,
+    0x3fe7b17b0976d060, 0x3d20400000000000,
+    0x3fe7c1ed0130c133, 0x3ca0000000000000,
+    0x3fe7d26a62ff8636, 0xbd26900000000000,
+    0x3fe7e2f336cf4e3b, 0xbd02e00000000000,
+    0x3fe7f3878491c3e8, 0xbd24580000000000,
+    0x3fe80427543e1b4e, 0x3d33000000000000,
+    0x3fe814d2add1071a, 0x3d0f000000000000,
+    0x3fe82589994ccd7e, 0xbd21c00000000000,
+    0x3fe8364c1eb942d0, 0x3d29d00000000000,
+    0x3fe8471a4623cab5, 0x3d47100000000000,
+    0x3fe857f4179f5bbc, 0x3d22600000000000,
+    0x3fe868d99b4491af, 0xbd32c40000000000,
+    0x3fe879cad931a395, 0xbd23000000000000,
+    0x3fe88ac7d98a65b8, 0xbd2a800000000000,
+    0x3fe89bd0a4785800, 0xbced000000000000,
+    0x3fe8ace5422aa223, 0x3d33280000000000,
+    0x3fe8be05bad619fa, 0x3d42b40000000000,
+    0x3fe8cf3216b54383, 0xbd2ed00000000000,
+    0x3fe8e06a5e08664c, 0xbd20500000000000,
+    0x3fe8f1ae99157807, 0x3d28280000000000,
+    0x3fe902fed0282c0e, 0xbd1cb00000000000,
+    0x3fe9145b0b91ff96, 0xbd05e00000000000,
+    0x3fe925c353aa2ff9, 0x3cf5400000000000,
+    0x3fe93737b0cdc64a, 0x3d17200000000000,
+    0x3fe948b82b5f98ae, 0xbd09000000000000,
+    0x3fe95a44cbc852cb, 0x3d25680000000000,
+    0x3fe96bdd9a766f21, 0xbd36d00000000000,
+    0x3fe97d829fde4e2a, 0xbd01000000000000,
+    0x3fe98f33e47a23a3, 0x3d2d000000000000,
+    0x3fe9a0f170ca0604, 0xbd38a40000000000,
+    0x3fe9b2bb4d53ff89, 0x3d355c0000000000,
+    0x3fe9c49182a3f15b, 0x3d26b80000000000,
+    0x3fe9d674194bb8c5, 0xbcec000000000000,
+    0x3fe9e86319e3238e, 0x3d17d00000000000,
+    0x3fe9fa5e8d07f302, 0x3d16400000000000,
+    0x3fea0c667b5de54d, 0xbcf5000000000000,
+    0x3fea1e7aed8eb8f6, 0x3d09e00000000000,
+    0x3fea309bec4a2e27, 0x3d2ad80000000000,
+    0x3fea42c980460a5d, 0xbd1af00000000000,
+    0x3fea5503b23e259b, 0x3d0b600000000000,
+    0x3fea674a8af46213, 0x3d38880000000000,
+    0x3fea799e1330b3a7, 0x3d11200000000000,
+    0x3fea8bfe53c12e8d, 0x3d06c00000000000,
+    0x3fea9e6b5579fcd2, 0xbd29b80000000000,
+    0x3feab0e521356fb8, 0x3d2b700000000000,
+    0x3feac36bbfd3f381, 0x3cd9000000000000,
+    0x3fead5ff3a3c2780, 0x3ce4000000000000,
+    0x3feae89f995ad2a3, 0xbd2c900000000000,
+    0x3feafb4ce622f367, 0x3d16500000000000,
+    0x3feb0e07298db790, 0x3d2fd40000000000,
+    0x3feb20ce6c9a89a9, 0x3d12700000000000,
+    0x3feb33a2b84f1a4b, 0x3d4d470000000000,
+    0x3feb468415b747e7, 0xbd38380000000000,
+    0x3feb59728de5593a, 0x3c98000000000000,
+    0x3feb6c6e29f1c56a, 0x3d0ad00000000000,
+    0x3feb7f76f2fb5e50, 0x3cde800000000000,
+    0x3feb928cf22749b2, 0xbd04c00000000000,
+    0x3feba5b030a10603, 0xbd0d700000000000,
+    0x3febb8e0b79a6f66, 0x3d0d900000000000,
+    0x3febcc1e904bc1ff, 0x3d02a00000000000,
+    0x3febdf69c3f3a16f, 0xbd1f780000000000,
+    0x3febf2c25bd71db8, 0xbd10a00000000000,
+    0x3fec06286141b2e9, 0xbd11400000000000,
+    0x3fec199bdd8552e0, 0x3d0be00000000000,
+    0x3fec2d1cd9fa64ee, 0xbd09400000000000,
+    0x3fec40ab5fffd02f, 0xbd0ed00000000000,
+    0x3fec544778fafd15, 0x3d39660000000000,
+    0x3fec67f12e57d0cb, 0xbd1a100000000000,
+    0x3fec7ba88988c1b6, 0xbd58458000000000,
+    0x3fec8f6d9406e733, 0xbd1a480000000000,
+    0x3feca3405751c4df, 0x3ccb000000000000,
+    0x3fecb720dcef9094, 0x3d01400000000000,
+    0x3feccb0f2e6d1689, 0x3cf0200000000000,
+    0x3fecdf0b555dc412, 0x3cf3600000000000,
+    0x3fecf3155b5bab3b, 0xbd06900000000000,
+    0x3fed072d4a0789bc, 0x3d09a00000000000,
+    0x3fed1b532b08c8fa, 0xbd15e00000000000,
+    0x3fed2f87080d8a85, 0x3d1d280000000000,
+    0x3fed43c8eacaa203, 0x3d01a00000000000,
+    0x3fed5818dcfba491, 0x3cdf000000000000,
+    0x3fed6c76e862e6a1, 0xbd03a00000000000,
+    0x3fed80e316c9834e, 0xbd0cd80000000000,
+    0x3fed955d71ff6090, 0x3cf4c00000000000,
+    0x3feda9e603db32ae, 0x3cff900000000000,
+    0x3fedbe7cd63a8325, 0x3ce9800000000000,
+    0x3fedd321f301b445, 0xbcf5200000000000,
+    0x3fede7d5641c05bf, 0xbd1d700000000000,
+    0x3fedfc97337b9aec, 0xbd16140000000000,
+    0x3fee11676b197d5e, 0x3d0b480000000000,
+    0x3fee264614f5a3e7, 0x3d40ce0000000000,
+    0x3fee3b333b16ee5c, 0x3d0c680000000000,
+    0x3fee502ee78b3fb4, 0xbd09300000000000,
+    0x3fee653924676d68, 0xbce5000000000000,
+    0x3fee7a51fbc74c44, 0xbd07f80000000000,
+    0x3fee8f7977cdb726, 0xbcf3700000000000,
+    0x3feea4afa2a490e8, 0x3ce5d00000000000,
+    0x3feeb9f4867ccae4, 0x3d161a0000000000,
+    0x3feecf482d8e680d, 0x3cf5500000000000,
+    0x3feee4aaa2188514, 0x3cc6400000000000,
+    0x3feefa1bee615a13, 0xbcee800000000000,
+    0x3fef0f9c1cb64106, 0xbcfa880000000000,
+    0x3fef252b376bb963, 0xbd2c900000000000,
+    0x3fef3ac948dd7275, 0x3caa000000000000,
+    0x3fef50765b6e4524, 0xbcf4f00000000000,
+    0x3fef6632798844fd, 0x3cca800000000000,
+    0x3fef7bfdad9cbe38, 0x3cfabc0000000000,
+    0x3fef91d802243c82, 0xbcd4600000000000,
+    0x3fefa7c1819e908e, 0xbd0b0c0000000000,
+    0x3fefbdba3692d511, 0xbcc0e00000000000,
+    0x3fefd3c22b8f7194, 0xbd10de8000000000,
+    0x3fefe9d96b2a23ee, 0x3cee430000000000,
+    0x3ff0000000000000, 0x0,
+    0x3ff00b1afa5abcbe, 0xbcb3400000000000,
+    0x3ff0163da9fb3303, 0xbd12170000000000,
+    0x3ff02168143b0282, 0x3cba400000000000,
+    0x3ff02c9a3e77806c, 0x3cef980000000000,
+    0x3ff037d42e11bbca, 0xbcc7400000000000,
+    0x3ff04315e86e7f89, 0x3cd8300000000000,
+    0x3ff04e5f72f65467, 0xbd1a3f0000000000,
+    0x3ff059b0d315855a, 0xbd02840000000000,
+    0x3ff0650a0e3c1f95, 0x3cf1600000000000,
+    0x3ff0706b29ddf71a, 0x3d15240000000000,
+    0x3ff07bd42b72a82d, 0xbce9a00000000000,
+    0x3ff0874518759bd0, 0x3ce6400000000000,
+    0x3ff092bdf66607c8, 0xbd00780000000000,
+    0x3ff09e3ecac6f383, 0xbc98000000000000,
+    0x3ff0a9c79b1f3930, 0x3cffa00000000000,
+    0x3ff0b5586cf988fc, 0xbcfac80000000000,
+    0x3ff0c0f145e46c8a, 0x3cd9c00000000000,
+    0x3ff0cc922b724816, 0x3d05200000000000,
+    0x3ff0d83b23395dd8, 0xbcfad00000000000,
+    0x3ff0e3ec32d3d1f3, 0x3d1bac0000000000,
+    0x3ff0efa55fdfa9a6, 0xbd04e80000000000,
+    0x3ff0fb66affed2f0, 0xbd0d300000000000,
+    0x3ff1073028d7234b, 0x3cf1500000000000,
+    0x3ff11301d0125b5b, 0x3cec000000000000,
+    0x3ff11edbab5e2af9, 0x3d16bc0000000000,
+    0x3ff12abdc06c31d5, 0x3ce8400000000000,
+    0x3ff136a814f2047d, 0xbd0ed00000000000,
+    0x3ff1429aaea92de9, 0x3ce8e00000000000,
+    0x3ff14e95934f3138, 0x3ceb400000000000,
+    0x3ff15a98c8a58e71, 0x3d05300000000000,
+    0x3ff166a45471c3df, 0x3d03380000000000,
+    0x3ff172b83c7d5211, 0x3d28d40000000000,
+    0x3ff17ed48695bb9f, 0xbd05d00000000000,
+    0x3ff18af9388c8d93, 0xbd1c880000000000,
+    0x3ff1972658375d66, 0x3d11f00000000000,
+    0x3ff1a35beb6fcba7, 0x3d10480000000000,
+    0x3ff1af99f81387e3, 0xbd47390000000000,
+    0x3ff1bbe084045d54, 0x3d24e40000000000,
+    0x3ff1c82f95281c43, 0xbd0a200000000000,
+    0x3ff1d4873168b9b2, 0x3ce3800000000000,
+    0x3ff1e0e75eb44031, 0x3ceac00000000000,
+    0x3ff1ed5022fcd938, 0x3d01900000000000,
+    0x3ff1f9c18438cdf7, 0xbd1b780000000000,
+    0x3ff2063b88628d8f, 0x3d2d940000000000,
+    0x3ff212be3578a81e, 0x3cd8000000000000,
+    0x3ff21f49917ddd41, 0x3d2b340000000000,
+    0x3ff22bdda2791323, 0x3d19f80000000000,
+    0x3ff2387a6e7561e7, 0xbd19c80000000000,
+    0x3ff2451ffb821427, 0x3d02300000000000,
+    0x3ff251ce4fb2a602, 0xbd13480000000000,
+    0x3ff25e85711eceb0, 0x3d12700000000000,
+    0x3ff26b4565e27d16, 0x3d11d00000000000,
+    0x3ff2780e341de00f, 0x3d31ee0000000000,
+    0x3ff284dfe1f5633e, 0xbd14c00000000000,
+    0x3ff291ba7591bb30, 0xbd13d80000000000,
+    0x3ff29e9df51fdf09, 0x3d08b00000000000,
+    0x3ff2ab8a66d10e9b, 0xbd227c0000000000,
+    0x3ff2b87fd0dada3a, 0x3d2a340000000000,
+    0x3ff2c57e39771af9, 0xbd10800000000000,
+    0x3ff2d285a6e402d9, 0xbd0ed00000000000,
+    0x3ff2df961f641579, 0xbcf4200000000000,
+    0x3ff2ecafa93e2ecf, 0xbd24980000000000,
+    0x3ff2f9d24abd8822, 0xbd16300000000000,
+    0x3ff306fe0a31b625, 0xbd32360000000000,
+    0x3ff31432edeea50b, 0xbd70df8000000000,
+    0x3ff32170fc4cd7b8, 0xbd22480000000000,
+    0x3ff32eb83ba8e9a2, 0xbd25980000000000,
+    0x3ff33c08b2641766, 0x3d1ed00000000000,
+    0x3ff3496266e3fa27, 0xbcdc000000000000,
+    0x3ff356c55f929f0f, 0xbd30d80000000000,
+    0x3ff36431a2de88b9, 0x3d22c80000000000,
+    0x3ff371a7373aaa39, 0x3d20600000000000,
+    0x3ff37f26231e74fe, 0xbd16600000000000,
+    0x3ff38cae6d05d838, 0xbd0ae00000000000,
+    0x3ff39a401b713ec3, 0xbd44720000000000,
+    0x3ff3a7db34e5a020, 0x3d08200000000000,
+    0x3ff3b57fbfec6e95, 0x3d3e800000000000,
+    0x3ff3c32dc313a8f2, 0x3cef800000000000,
+    0x3ff3d0e544ede122, 0xbd17a00000000000,
+    0x3ff3dea64c1234bb, 0x3d26300000000000,
+    0x3ff3ec70df1c4ecc, 0xbd48a60000000000,
+    0x3ff3fa4504ac7e8c, 0xbd3cdc0000000000,
+    0x3ff40822c367a0bb, 0x3d25b80000000000,
+    0x3ff4160a21f72e95, 0x3d1ec00000000000,
+    0x3ff423fb27094646, 0xbd13600000000000,
+    0x3ff431f5d950a920, 0x3d23980000000000,
+    0x3ff43ffa3f84b9eb, 0x3cfa000000000000,
+    0x3ff44e0860618919, 0xbcf6c00000000000,
+    0x3ff45c2042a7d201, 0xbd0bc00000000000,
+    0x3ff46a41ed1d0016, 0xbd12800000000000,
+    0x3ff4786d668b3326, 0x3d30e00000000000,
+    0x3ff486a2b5c13c00, 0xbd2d400000000000,
+    0x3ff494e1e192af04, 0x3d0c200000000000,
+    0x3ff4a32af0d7d372, 0xbd1e500000000000,
+    0x3ff4b17dea6db801, 0x3d07800000000000,
+    0x3ff4bfdad53629e1, 0xbd13800000000000,
+    0x3ff4ce41b817c132, 0x3d00800000000000,
+    0x3ff4dcb299fddddb, 0x3d2c700000000000,
+    0x3ff4eb2d81d8ab96, 0xbd1ce00000000000,
+    0x3ff4f9b2769d2d02, 0x3d19200000000000,
+    0x3ff508417f4531c1, 0xbd08c00000000000,
+    0x3ff516daa2cf662a, 0xbcfa000000000000,
+    0x3ff5257de83f51ea, 0x3d4a080000000000,
+    0x3ff5342b569d4eda, 0xbd26d80000000000,
+    0x3ff542e2f4f6ac1a, 0xbd32440000000000,
+    0x3ff551a4ca5d94db, 0x3d483c0000000000,
+    0x3ff56070dde9116b, 0x3d24b00000000000,
+    0x3ff56f4736b529de, 0x3d415a0000000000,
+    0x3ff57e27dbe2c40e, 0xbd29e00000000000,
+    0x3ff58d12d497c76f, 0xbd23080000000000,
+    0x3ff59c0827ff0b4c, 0x3d4dec0000000000,
+    0x3ff5ab07dd485427, 0xbcc4000000000000,
+    0x3ff5ba11fba87af4, 0x3d30080000000000,
+    0x3ff5c9268a59460b, 0xbd26c80000000000,
+    0x3ff5d84590998e3f, 0x3d469a0000000000,
+    0x3ff5e76f15ad20e1, 0xbd1b400000000000,
+    0x3ff5f6a320dcebca, 0x3d17700000000000,
+    0x3ff605e1b976dcb8, 0x3d26f80000000000,
+    0x3ff6152ae6cdf715, 0x3d01000000000000,
+    0x3ff6247eb03a5531, 0xbd15d00000000000,
+    0x3ff633dd1d1929b5, 0xbd12d00000000000,
+    0x3ff6434634ccc313, 0xbcea800000000000,
+    0x3ff652b9febc8efa, 0xbd28600000000000,
+    0x3ff6623882553397, 0x3d71fe0000000000,
+    0x3ff671c1c708328e, 0xbd37200000000000,
+    0x3ff68155d44ca97e, 0x3ce6800000000000,
+    0x3ff690f4b19e9471, 0xbd29780000000000,
+];
+
+// exp2(x): compute the base 2 exponential of x
+//
+// Accuracy: Peak error < 0.503 ulp for normalized results.
+//
+// Method: (accurate tables)
+//
+//   Reduce x:
+//     x = k + y, for integer k and |y| <= 1/2.
+//     Thus we have exp2(x) = 2**k * exp2(y).
+//
+//   Reduce y:
+//     y = i/TBLSIZE + z - eps[i] for integer i near y * TBLSIZE.
+//     Thus we have exp2(y) = exp2(i/TBLSIZE) * exp2(z - eps[i]),
+//     with |z - eps[i]| <= 2**-9 + 2**-39 for the table used.
+//
+//   We compute exp2(i/TBLSIZE) via table lookup and exp2(z - eps[i]) via
+//   a degree-5 minimax polynomial with maximum error under 1.3 * 2**-61.
+//   The values in exp2t[] and eps[] are chosen such that
+//   exp2t[i] = exp2(i/TBLSIZE + eps[i]), and eps[i] is a small offset such
+//   that exp2t[i] is accurate to 2**-64.
+//
+//   Note that the range of i is +-TBLSIZE/2, so we actually index the tables
+//   by i0 = i + TBLSIZE/2.  For cache efficiency, exp2t[] and eps[] are
+//   virtual tables, interleaved in the real table tbl[].
+//
+//   This method is due to Gal, with many details due to Gal and Bachelis:
+//
+//      Gal, S. and Bachelis, B.  An Accurate Elementary Mathematical Library
+//      for the IEEE Floating Point Standard.  TOMS 17(1), 26-46 (1991).
+
+/// Exponential, base 2 (f64)
+///
+/// Calculate `2^x`, that is, 2 raised to the power `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn exp2(mut x: f64) -> f64 {
+    let redux = f64::from_bits(0x4338000000000000) / TBLSIZE as f64;
+    let p1 = f64::from_bits(0x3fe62e42fefa39ef);
+    let p2 = f64::from_bits(0x3fcebfbdff82c575);
+    let p3 = f64::from_bits(0x3fac6b08d704a0a6);
+    let p4 = f64::from_bits(0x3f83b2ab88f70400);
+    let p5 = f64::from_bits(0x3f55d88003875c74);
+
+    // double_t r, t, z;
+    // uint32_t ix, i0;
+    // union {double f; uint64_t i;} u = {x};
+    // union {uint32_t u; int32_t i;} k;
+    let x1p1023 = f64::from_bits(0x7fe0000000000000);
+    let x1p52 = f64::from_bits(0x4330000000000000);
+    let _0x1p_149 = f64::from_bits(0xb6a0000000000000);
+
+    /* Filter out exceptional cases. */
+    let ui = f64::to_bits(x);
+    let ix = ui >> 32 & 0x7fffffff;
+    if ix >= 0x408ff000 {
+        /* |x| >= 1022 or nan */
+        if ix >= 0x40900000 && ui >> 63 == 0 {
+            /* x >= 1024 or nan */
+            /* overflow */
+            x *= x1p1023;
+            return x;
+        }
+        if ix >= 0x7ff00000 {
+            /* -inf or -nan */
+            return -1.0 / x;
+        }
+        if ui >> 63 != 0 {
+            /* x <= -1022 */
+            /* underflow */
+            if x <= -1075.0 || x - x1p52 + x1p52 != x {
+                force_eval!((_0x1p_149 / x) as f32);
+            }
+            if x <= -1075.0 {
+                return 0.0;
+            }
+        }
+    } else if ix < 0x3c900000 {
+        /* |x| < 0x1p-54 */
+        return 1.0 + x;
+    }
+
+    /* Reduce x, computing z, i0, and k. */
+    let ui = f64::to_bits(x + redux);
+    let mut i0 = ui as u32;
+    i0 = i0.wrapping_add(TBLSIZE as u32 / 2);
+    let ku = i0 / TBLSIZE as u32 * TBLSIZE as u32;
+    let ki = ku as i32 / TBLSIZE as i32;
+    i0 %= TBLSIZE as u32;
+    let uf = f64::from_bits(ui) - redux;
+    let mut z = x - uf;
+
+    /* Compute r = exp2(y) = exp2t[i0] * p(z - eps[i]). */
+    let t = f64::from_bits(TBL[2 * i0 as usize]); /* exp2t[i0] */
+    z -= f64::from_bits(TBL[2 * i0 as usize + 1]); /* eps[i0]   */
+    let r = t + t * z * (p1 + z * (p2 + z * (p3 + z * (p4 + z * p5))));
+
+    scalbn(r, ki)
+}
+
+#[test]
+fn i0_wrap_test() {
+    let x = -3.0 / 256.0;
+    assert_eq!(exp2(x), f64::from_bits(0x3fefbdba3692d514));
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/exp2f.rs.html b/src/libm/math/exp2f.rs.html new file mode 100644 index 000000000..aeecb67bd --- /dev/null +++ b/src/libm/math/exp2f.rs.html @@ -0,0 +1,275 @@ +exp2f.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+
+// origin: FreeBSD /usr/src/lib/msun/src/s_exp2f.c
+//-
+// Copyright (c) 2005 David Schultz <das@FreeBSD.ORG>
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+// 1. Redistributions of source code must retain the above copyright
+//    notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+//    notice, this list of conditions and the following disclaimer in the
+//    documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+// SUCH DAMAGE.
+
+const TBLSIZE: usize = 16;
+
+static EXP2FT: [u64; TBLSIZE] = [
+    0x3fe6a09e667f3bcd,
+    0x3fe7a11473eb0187,
+    0x3fe8ace5422aa0db,
+    0x3fe9c49182a3f090,
+    0x3feae89f995ad3ad,
+    0x3fec199bdd85529c,
+    0x3fed5818dcfba487,
+    0x3feea4afa2a490da,
+    0x3ff0000000000000,
+    0x3ff0b5586cf9890f,
+    0x3ff172b83c7d517b,
+    0x3ff2387a6e756238,
+    0x3ff306fe0a31b715,
+    0x3ff3dea64c123422,
+    0x3ff4bfdad5362a27,
+    0x3ff5ab07dd485429,
+];
+
+// exp2f(x): compute the base 2 exponential of x
+//
+// Accuracy: Peak error < 0.501 ulp; location of peak: -0.030110927.
+//
+// Method: (equally-spaced tables)
+//
+//   Reduce x:
+//     x = k + y, for integer k and |y| <= 1/2.
+//     Thus we have exp2f(x) = 2**k * exp2(y).
+//
+//   Reduce y:
+//     y = i/TBLSIZE + z for integer i near y * TBLSIZE.
+//     Thus we have exp2(y) = exp2(i/TBLSIZE) * exp2(z),
+//     with |z| <= 2**-(TBLSIZE+1).
+//
+//   We compute exp2(i/TBLSIZE) via table lookup and exp2(z) via a
+//   degree-4 minimax polynomial with maximum error under 1.4 * 2**-33.
+//   Using double precision for everything except the reduction makes
+//   roundoff error insignificant and simplifies the scaling step.
+//
+//   This method is due to Tang, but I do not use his suggested parameters:
+//
+//      Tang, P.  Table-driven Implementation of the Exponential Function
+//      in IEEE Floating-Point Arithmetic.  TOMS 15(2), 144-157 (1989).
+
+/// Exponential, base 2 (f32)
+///
+/// Calculate `2^x`, that is, 2 raised to the power `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn exp2f(mut x: f32) -> f32 {
+    let redux = f32::from_bits(0x4b400000) / TBLSIZE as f32;
+    let p1 = f32::from_bits(0x3f317218);
+    let p2 = f32::from_bits(0x3e75fdf0);
+    let p3 = f32::from_bits(0x3d6359a4);
+    let p4 = f32::from_bits(0x3c1d964e);
+
+    // double_t t, r, z;
+    // uint32_t ix, i0, k;
+
+    let x1p127 = f32::from_bits(0x7f000000);
+
+    /* Filter out exceptional cases. */
+    let ui = f32::to_bits(x);
+    let ix = ui & 0x7fffffff;
+    if ix > 0x42fc0000 {
+        /* |x| > 126 */
+        if ix > 0x7f800000 {
+            /* NaN */
+            return x;
+        }
+        if ui >= 0x43000000 && ui < 0x80000000 {
+            /* x >= 128 */
+            x *= x1p127;
+            return x;
+        }
+        if ui >= 0x80000000 {
+            /* x < -126 */
+            if ui >= 0xc3160000 || (ui & 0x0000ffff != 0) {
+                force_eval!(f32::from_bits(0x80000001) / x);
+            }
+            if ui >= 0xc3160000 {
+                /* x <= -150 */
+                return 0.0;
+            }
+        }
+    } else if ix <= 0x33000000 {
+        /* |x| <= 0x1p-25 */
+        return 1.0 + x;
+    }
+
+    /* Reduce x, computing z, i0, and k. */
+    let ui = f32::to_bits(x + redux);
+    let mut i0 = ui;
+    i0 += TBLSIZE as u32 / 2;
+    let k = i0 / TBLSIZE as u32;
+    let ukf = f64::from_bits(((0x3ff + k) as u64) << 52);
+    i0 &= TBLSIZE as u32 - 1;
+    let mut uf = f32::from_bits(ui);
+    uf -= redux;
+    let z: f64 = (x - uf) as f64;
+    /* Compute r = exp2(y) = exp2ft[i0] * p(z). */
+    let r: f64 = f64::from_bits(EXP2FT[i0 as usize]);
+    let t: f64 = r as f64 * z;
+    let r: f64 = r + t * (p1 as f64 + z * p2 as f64) + t * (z * z) * (p3 as f64 + z * p4 as f64);
+
+    /* Scale by 2**k */
+    (r * ukf) as f32
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/expf.rs.html b/src/libm/math/expf.rs.html new file mode 100644 index 000000000..7b86caed8 --- /dev/null +++ b/src/libm/math/expf.rs.html @@ -0,0 +1,207 @@ +expf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_expf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::scalbnf;
+
+const HALF: [f32; 2] = [0.5, -0.5];
+const LN2_HI: f32 = 6.9314575195e-01; /* 0x3f317200 */
+const LN2_LO: f32 = 1.4286067653e-06; /* 0x35bfbe8e */
+const INV_LN2: f32 = 1.4426950216e+00; /* 0x3fb8aa3b */
+/*
+ * Domain [-0.34568, 0.34568], range ~[-4.278e-9, 4.447e-9]:
+ * |x*(exp(x)+1)/(exp(x)-1) - p(x)| < 2**-27.74
+ */
+const P1: f32 = 1.6666625440e-1; /*  0xaaaa8f.0p-26 */
+const P2: f32 = -2.7667332906e-3; /* -0xb55215.0p-32 */
+
+/// Exponential, base *e* (f32)
+///
+/// Calculate the exponential of `x`, that is, *e* raised to the power `x`
+/// (where *e* is the base of the natural system of logarithms, approximately 2.71828).
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn expf(mut x: f32) -> f32 {
+    let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
+    let x1p_126 = f32::from_bits(0x800000); // 0x1p-126f === 2 ^ -126  /*original 0x1p-149f    ??????????? */
+    let mut hx = x.to_bits();
+    let sign = (hx >> 31) as i32; /* sign bit of x */
+    let signb: bool = sign != 0;
+    hx &= 0x7fffffff; /* high word of |x| */
+
+    /* special cases */
+    if hx >= 0x42aeac50 {
+        /* if |x| >= -87.33655f or NaN */
+        if hx > 0x7f800000 {
+            /* NaN */
+            return x;
+        }
+        if (hx >= 0x42b17218) && (!signb) {
+            /* x >= 88.722839f */
+            /* overflow */
+            x *= x1p127;
+            return x;
+        }
+        if signb {
+            /* underflow */
+            force_eval!(-x1p_126 / x);
+            if hx >= 0x42cff1b5 {
+                /* x <= -103.972084f */
+                return 0.;
+            }
+        }
+    }
+
+    /* argument reduction */
+    let k: i32;
+    let hi: f32;
+    let lo: f32;
+    if hx > 0x3eb17218 {
+        /* if |x| > 0.5 ln2 */
+        if hx > 0x3f851592 {
+            /* if |x| > 1.5 ln2 */
+            k = (INV_LN2 * x + HALF[sign as usize]) as i32;
+        } else {
+            k = 1 - sign - sign;
+        }
+        let kf = k as f32;
+        hi = x - kf * LN2_HI; /* k*ln2hi is exact here */
+        lo = kf * LN2_LO;
+        x = hi - lo;
+    } else if hx > 0x39000000 {
+        /* |x| > 2**-14 */
+        k = 0;
+        hi = x;
+        lo = 0.;
+    } else {
+        /* raise inexact */
+        force_eval!(x1p127 + x);
+        return 1. + x;
+    }
+
+    /* x is now in primary range */
+    let xx = x * x;
+    let c = x - xx * (P1 + xx * P2);
+    let y = 1. + (x * c / (2. - c) - lo + hi);
+    if k == 0 {
+        y
+    } else {
+        scalbnf(y, k)
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/expm1.rs.html b/src/libm/math/expm1.rs.html new file mode 100644 index 000000000..76785464e --- /dev/null +++ b/src/libm/math/expm1.rs.html @@ -0,0 +1,293 @@ +expm1.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_expm1.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use core::f64;
+
+const O_THRESHOLD: f64 = 7.09782712893383973096e+02; /* 0x40862E42, 0xFEFA39EF */
+const LN2_HI: f64 = 6.93147180369123816490e-01; /* 0x3fe62e42, 0xfee00000 */
+const LN2_LO: f64 = 1.90821492927058770002e-10; /* 0x3dea39ef, 0x35793c76 */
+const INVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547, 0x652b82fe */
+/* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs = x*x/2: */
+const Q1: f64 = -3.33333333333331316428e-02; /* BFA11111 111110F4 */
+const Q2: f64 = 1.58730158725481460165e-03; /* 3F5A01A0 19FE5585 */
+const Q3: f64 = -7.93650757867487942473e-05; /* BF14CE19 9EAADBB7 */
+const Q4: f64 = 4.00821782732936239552e-06; /* 3ED0CFCA 86E65239 */
+const Q5: f64 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */
+
+/// Exponential, base *e*, of x-1 (f64)
+///
+/// Calculates the exponential of `x` and subtract 1, that is, *e* raised
+/// to the power `x` minus 1 (where *e* is the base of the natural
+/// system of logarithms, approximately 2.71828).
+/// The result is accurate even for small values of `x`,
+/// where using `exp(x)-1` would lose many significant digits.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn expm1(mut x: f64) -> f64 {
+    let hi: f64;
+    let lo: f64;
+    let k: i32;
+    let c: f64;
+    let mut t: f64;
+    let mut y: f64;
+
+    let mut ui = x.to_bits();
+    let hx = ((ui >> 32) & 0x7fffffff) as u32;
+    let sign = (ui >> 63) as i32;
+
+    /* filter out huge and non-finite argument */
+    if hx >= 0x4043687A {
+        /* if |x|>=56*ln2 */
+        if x.is_nan() {
+            return x;
+        }
+        if sign != 0 {
+            return -1.0;
+        }
+        if x > O_THRESHOLD {
+            x *= f64::from_bits(0x7fe0000000000000);
+            return x;
+        }
+    }
+
+    /* argument reduction */
+    if hx > 0x3fd62e42 {
+        /* if  |x| > 0.5 ln2 */
+        if hx < 0x3FF0A2B2 {
+            /* and |x| < 1.5 ln2 */
+            if sign == 0 {
+                hi = x - LN2_HI;
+                lo = LN2_LO;
+                k = 1;
+            } else {
+                hi = x + LN2_HI;
+                lo = -LN2_LO;
+                k = -1;
+            }
+        } else {
+            k = (INVLN2 * x + if sign != 0 { -0.5 } else { 0.5 }) as i32;
+            t = k as f64;
+            hi = x - t * LN2_HI; /* t*ln2_hi is exact here */
+            lo = t * LN2_LO;
+        }
+        x = hi - lo;
+        c = (hi - x) - lo;
+    } else if hx < 0x3c900000 {
+        /* |x| < 2**-54, return x */
+        if hx < 0x00100000 {
+            force_eval!(x);
+        }
+        return x;
+    } else {
+        c = 0.0;
+        k = 0;
+    }
+
+    /* x is now in primary range */
+    let hfx = 0.5 * x;
+    let hxs = x * hfx;
+    let r1 = 1.0 + hxs * (Q1 + hxs * (Q2 + hxs * (Q3 + hxs * (Q4 + hxs * Q5))));
+    t = 3.0 - r1 * hfx;
+    let mut e = hxs * ((r1 - t) / (6.0 - x * t));
+    if k == 0 {
+        /* c is 0 */
+        return x - (x * e - hxs);
+    }
+    e = x * (e - c) - c;
+    e -= hxs;
+    /* exp(x) ~ 2^k (x_reduced - e + 1) */
+    if k == -1 {
+        return 0.5 * (x - e) - 0.5;
+    }
+    if k == 1 {
+        if x < -0.25 {
+            return -2.0 * (e - (x + 0.5));
+        }
+        return 1.0 + 2.0 * (x - e);
+    }
+    ui = ((0x3ff + k) as u64) << 52; /* 2^k */
+    let twopk = f64::from_bits(ui);
+    if k < 0 || k > 56 {
+        /* suffice to return exp(x)-1 */
+        y = x - e + 1.0;
+        if k == 1024 {
+            y = y * 2.0 * f64::from_bits(0x7fe0000000000000);
+        } else {
+            y = y * twopk;
+        }
+        return y - 1.0;
+    }
+    ui = ((0x3ff - k) as u64) << 52; /* 2^-k */
+    let uf = f64::from_bits(ui);
+    if k < 20 {
+        y = (x - e + (1.0 - uf)) * twopk;
+    } else {
+        y = (x - (e + uf) + 1.0) * twopk;
+    }
+    y
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn sanity_check() {
+        assert_eq!(super::expm1(1.1), 2.0041660239464334);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/expm1f.rs.html b/src/libm/math/expm1f.rs.html new file mode 100644 index 000000000..74703b4e7 --- /dev/null +++ b/src/libm/math/expm1f.rs.html @@ -0,0 +1,273 @@ +expm1f.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_expm1f.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+const O_THRESHOLD: f32 = 8.8721679688e+01; /* 0x42b17180 */
+const LN2_HI: f32 = 6.9313812256e-01; /* 0x3f317180 */
+const LN2_LO: f32 = 9.0580006145e-06; /* 0x3717f7d1 */
+const INV_LN2: f32 = 1.4426950216e+00; /* 0x3fb8aa3b */
+/*
+ * Domain [-0.34568, 0.34568], range ~[-6.694e-10, 6.696e-10]:
+ * |6 / x * (1 + 2 * (1 / (exp(x) - 1) - 1 / x)) - q(x)| < 2**-30.04
+ * Scaled coefficients: Qn_here = 2**n * Qn_for_q (see s_expm1.c):
+ */
+const Q1: f32 = -3.3333212137e-2; /* -0x888868.0p-28 */
+const Q2: f32 = 1.5807170421e-3; /*  0xcf3010.0p-33 */
+
+/// Exponential, base *e*, of x-1 (f32)
+///
+/// Calculates the exponential of `x` and subtract 1, that is, *e* raised
+/// to the power `x` minus 1 (where *e* is the base of the natural
+/// system of logarithms, approximately 2.71828).
+/// The result is accurate even for small values of `x`,
+/// where using `exp(x)-1` would lose many significant digits.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn expm1f(mut x: f32) -> f32 {
+    let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
+
+    let mut hx = x.to_bits();
+    let sign = (hx >> 31) != 0;
+    hx &= 0x7fffffff;
+
+    /* filter out huge and non-finite argument */
+    if hx >= 0x4195b844 {
+        /* if |x|>=27*ln2 */
+        if hx > 0x7f800000 {
+            /* NaN */
+            return x;
+        }
+        if sign {
+            return -1.;
+        }
+        if x > O_THRESHOLD {
+            x *= x1p127;
+            return x;
+        }
+    }
+
+    let k: i32;
+    let hi: f32;
+    let lo: f32;
+    let mut c = 0f32;
+    /* argument reduction */
+    if hx > 0x3eb17218 {
+        /* if  |x| > 0.5 ln2 */
+        if hx < 0x3F851592 {
+            /* and |x| < 1.5 ln2 */
+            if !sign {
+                hi = x - LN2_HI;
+                lo = LN2_LO;
+                k = 1;
+            } else {
+                hi = x + LN2_HI;
+                lo = -LN2_LO;
+                k = -1;
+            }
+        } else {
+            k = (INV_LN2 * x + (if sign { -0.5 } else { 0.5 })) as i32;
+            let t = k as f32;
+            hi = x - t * LN2_HI; /* t*ln2_hi is exact here */
+            lo = t * LN2_LO;
+        }
+        x = hi - lo;
+        c = (hi - x) - lo;
+    } else if hx < 0x33000000 {
+        /* when |x|<2**-25, return x */
+        if hx < 0x00800000 {
+            force_eval!(x * x);
+        }
+        return x;
+    } else {
+        k = 0;
+    }
+
+    /* x is now in primary range */
+    let hfx = 0.5 * x;
+    let hxs = x * hfx;
+    let r1 = 1. + hxs * (Q1 + hxs * Q2);
+    let t = 3. - r1 * hfx;
+    let mut e = hxs * ((r1 - t) / (6. - x * t));
+    if k == 0 {
+        /* c is 0 */
+        return x - (x * e - hxs);
+    }
+    e = x * (e - c) - c;
+    e -= hxs;
+    /* exp(x) ~ 2^k (x_reduced - e + 1) */
+    if k == -1 {
+        return 0.5 * (x - e) - 0.5;
+    }
+    if k == 1 {
+        if x < -0.25 {
+            return -2. * (e - (x + 0.5));
+        }
+        return 1. + 2. * (x - e);
+    }
+    let twopk = f32::from_bits(((0x7f + k) << 23) as u32); /* 2^k */
+    if (k < 0) || (k > 56) {
+        /* suffice to return exp(x)-1 */
+        let mut y = x - e + 1.;
+        if k == 128 {
+            y = y * 2. * x1p127;
+        } else {
+            y = y * twopk;
+        }
+        return y - 1.;
+    }
+    let uf = f32::from_bits(((0x7f - k) << 23) as u32); /* 2^-k */
+    if k < 23 {
+        (x - e + (1. - uf)) * twopk
+    } else {
+        (x - (e + uf) + 1.) * twopk
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/expo2.rs.html b/src/libm/math/expo2.rs.html new file mode 100644 index 000000000..5fde6a244 --- /dev/null +++ b/src/libm/math/expo2.rs.html @@ -0,0 +1,33 @@ +expo2.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+
+use super::{combine_words, exp};
+
+/* exp(x)/2 for x >= log(DBL_MAX), slightly better than 0.5*exp(x/2)*exp(x/2) */
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn expo2(x: f64) -> f64 {
+    /* k is such that k*ln2 has minimal relative error and x - kln2 > log(DBL_MIN) */
+    const K: i32 = 2043;
+    let kln2 = f64::from_bits(0x40962066151add8b);
+
+    /* note that k is odd and scale*scale overflows */
+    let scale = combine_words(((0x3ff + K / 2) as u32) << 20, 0);
+    /* exp(x - k ln2) * 2**(k-1) */
+    exp(x - kln2) * scale * scale
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fabs.rs.html b/src/libm/math/fabs.rs.html new file mode 100644 index 000000000..b53ee9ad9 --- /dev/null +++ b/src/libm/math/fabs.rs.html @@ -0,0 +1,39 @@ +fabs.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+
+use core::u64;
+
+/// Absolute value (magnitude) (f64)
+/// Calculates the absolute value (magnitude) of the argument `x`,
+/// by direct manipulation of the bit representation of `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fabs(x: f64) -> f64 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f64.abs` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::fabsf64(x) }
+        }
+    }
+    f64::from_bits(x.to_bits() & (u64::MAX / 2))
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fabsf.rs.html b/src/libm/math/fabsf.rs.html new file mode 100644 index 000000000..b64b63a33 --- /dev/null +++ b/src/libm/math/fabsf.rs.html @@ -0,0 +1,35 @@ +fabsf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+
+/// Absolute value (magnitude) (f32)
+/// Calculates the absolute value (magnitude) of the argument `x`,
+/// by direct manipulation of the bit representation of `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fabsf(x: f32) -> f32 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f32.abs` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::fabsf32(x) }
+        }
+    }
+    f32::from_bits(x.to_bits() & 0x7fffffff)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fdim.rs.html b/src/libm/math/fdim.rs.html new file mode 100644 index 000000000..f8495786f --- /dev/null +++ b/src/libm/math/fdim.rs.html @@ -0,0 +1,49 @@ +fdim.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+
+use core::f64;
+
+/// Positive difference (f64)
+///
+/// Determines the positive difference between arguments, returning:
+/// * x - y	if x > y, or
+/// * +0	if x <= y, or
+/// * NAN	if either argument is NAN.
+///
+/// A range error may occur.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fdim(x: f64, y: f64) -> f64 {
+    if x.is_nan() {
+        x
+    } else if y.is_nan() {
+        y
+    } else if x > y {
+        x - y
+    } else {
+        0.0
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fdimf.rs.html b/src/libm/math/fdimf.rs.html new file mode 100644 index 000000000..0a09c3cab --- /dev/null +++ b/src/libm/math/fdimf.rs.html @@ -0,0 +1,49 @@ +fdimf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+
+use core::f32;
+
+/// Positive difference (f32)
+///
+/// Determines the positive difference between arguments, returning:
+/// * x - y	if x > y, or
+/// * +0	if x <= y, or
+/// * NAN	if either argument is NAN.
+///
+/// A range error may occur.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fdimf(x: f32, y: f32) -> f32 {
+    if x.is_nan() {
+        x
+    } else if y.is_nan() {
+        y
+    } else if x > y {
+        x - y
+    } else {
+        0.0
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fenv.rs.html b/src/libm/math/fenv.rs.html new file mode 100644 index 000000000..1ff786e03 --- /dev/null +++ b/src/libm/math/fenv.rs.html @@ -0,0 +1,69 @@ +fenv.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+
+// src: musl/src/fenv/fenv.c
+/* Dummy functions for archs lacking fenv implementation */
+
+pub const FE_UNDERFLOW: i32 = 0;
+pub const FE_INEXACT: i32 = 0;
+
+pub const FE_TONEAREST: i32 = 0;
+pub const FE_TOWARDZERO: i32 = 0;
+
+#[inline]
+pub fn feclearexcept(_mask: i32) -> i32 {
+    0
+}
+
+#[inline]
+pub fn feraiseexcept(_mask: i32) -> i32 {
+    0
+}
+
+#[inline]
+pub fn fetestexcept(_mask: i32) -> i32 {
+    0
+}
+
+#[inline]
+pub fn fegetround() -> i32 {
+    FE_TONEAREST
+}
+
+#[inline]
+pub fn fesetround(_r: i32) -> i32 {
+    0
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/floor.rs.html b/src/libm/math/floor.rs.html new file mode 100644 index 000000000..e8fae4e23 --- /dev/null +++ b/src/libm/math/floor.rs.html @@ -0,0 +1,85 @@ +floor.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+
+use core::f64;
+
+const TOINT: f64 = 1. / f64::EPSILON;
+
+/// Floor (f64)
+///
+/// Finds the nearest integer less than or equal to `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn floor(x: f64) -> f64 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f64.floor` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::floorf64(x) }
+        }
+    }
+    let ui = x.to_bits();
+    let e = ((ui >> 52) & 0x7ff) as i32;
+
+    if (e >= 0x3ff + 52) || (x == 0.) {
+        return x;
+    }
+    /* y = int(x) - x, where int(x) is an integer neighbor of x */
+    let y = if (ui >> 63) != 0 {
+        x - TOINT + TOINT - x
+    } else {
+        x + TOINT - TOINT - x
+    };
+    /* special case because of non-nearest rounding modes */
+    if e < 0x3ff {
+        force_eval!(y);
+        return if (ui >> 63) != 0 { -1. } else { 0. };
+    }
+    if y > 0. {
+        x + y - 1.
+    } else {
+        x + y
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/floorf.rs.html b/src/libm/math/floorf.rs.html new file mode 100644 index 000000000..8ccd692cb --- /dev/null +++ b/src/libm/math/floorf.rs.html @@ -0,0 +1,103 @@ +floorf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+
+use core::f32;
+
+/// Floor (f64)
+///
+/// Finds the nearest integer less than or equal to `x`.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn floorf(x: f32) -> f32 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f32.floor` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::floorf32(x) }
+        }
+    }
+    let mut ui = x.to_bits();
+    let e = (((ui >> 23) as i32) & 0xff) - 0x7f;
+
+    if e >= 23 {
+        return x;
+    }
+    if e >= 0 {
+        let m: u32 = 0x007fffff >> e;
+        if (ui & m) == 0 {
+            return x;
+        }
+        force_eval!(x + f32::from_bits(0x7b800000));
+        if ui >> 31 != 0 {
+            ui += m;
+        }
+        ui &= !m;
+    } else {
+        force_eval!(x + f32::from_bits(0x7b800000));
+        if ui >> 31 == 0 {
+            ui = 0;
+        } else if ui << 1 != 0 {
+            return -1.0;
+        }
+    }
+    f32::from_bits(ui)
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn no_overflow() {
+        assert_eq!(super::floorf(0.5), 0.0);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fma.rs.html b/src/libm/math/fma.rs.html new file mode 100644 index 000000000..e121900e4 --- /dev/null +++ b/src/libm/math/fma.rs.html @@ -0,0 +1,417 @@ +fma.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+
+use core::{f32, f64};
+
+use super::scalbn;
+
+const ZEROINFNAN: i32 = 0x7ff - 0x3ff - 52 - 1;
+
+struct Num {
+    m: u64,
+    e: i32,
+    sign: i32,
+}
+
+#[inline]
+fn normalize(x: f64) -> Num {
+    let x1p63: f64 = f64::from_bits(0x43e0000000000000); // 0x1p63 === 2 ^ 63
+
+    let mut ix: u64 = x.to_bits();
+    let mut e: i32 = (ix >> 52) as i32;
+    let sign: i32 = e & 0x800;
+    e &= 0x7ff;
+    if e == 0 {
+        ix = (x * x1p63).to_bits();
+        e = (ix >> 52) as i32 & 0x7ff;
+        e = if e != 0 { e - 63 } else { 0x800 };
+    }
+    ix &= (1 << 52) - 1;
+    ix |= 1 << 52;
+    ix <<= 1;
+    e -= 0x3ff + 52 + 1;
+    Num { m: ix, e, sign }
+}
+
+#[inline]
+fn mul(x: u64, y: u64) -> (u64, u64) {
+    let t1: u64;
+    let t2: u64;
+    let t3: u64;
+    let xlo: u64 = x as u32 as u64;
+    let xhi: u64 = x >> 32;
+    let ylo: u64 = y as u32 as u64;
+    let yhi: u64 = y >> 32;
+
+    t1 = xlo * ylo;
+    t2 = xlo * yhi + xhi * ylo;
+    t3 = xhi * yhi;
+    let lo = t1.wrapping_add(t2 << 32);
+    let hi = t3 + (t2 >> 32) + (t1 > lo) as u64;
+    (hi, lo)
+}
+
+/// Floating multiply add (f64)
+///
+/// Computes `(x*y)+z`, rounded as one ternary operation:
+/// Computes the value (as if) to infinite precision and rounds once to the result format,
+/// according to the rounding mode characterized by the value of FLT_ROUNDS.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fma(x: f64, y: f64, z: f64) -> f64 {
+    let x1p63: f64 = f64::from_bits(0x43e0000000000000); // 0x1p63 === 2 ^ 63
+    let x0_ffffff8p_63 = f64::from_bits(0x3bfffffff0000000); // 0x0.ffffff8p-63
+
+    /* normalize so top 10bits and last bit are 0 */
+    let nx = normalize(x);
+    let ny = normalize(y);
+    let nz = normalize(z);
+
+    if nx.e >= ZEROINFNAN || ny.e >= ZEROINFNAN {
+        return x * y + z;
+    }
+    if nz.e >= ZEROINFNAN {
+        if nz.e > ZEROINFNAN {
+            /* z==0 */
+            return x * y + z;
+        }
+        return z;
+    }
+
+    /* mul: r = x*y */
+    let zhi: u64;
+    let zlo: u64;
+    let (mut rhi, mut rlo) = mul(nx.m, ny.m);
+    /* either top 20 or 21 bits of rhi and last 2 bits of rlo are 0 */
+
+    /* align exponents */
+    let mut e: i32 = nx.e + ny.e;
+    let mut d: i32 = nz.e - e;
+    /* shift bits z<<=kz, r>>=kr, so kz+kr == d, set e = e+kr (== ez-kz) */
+    if d > 0 {
+        if d < 64 {
+            zlo = nz.m << d;
+            zhi = nz.m >> (64 - d);
+        } else {
+            zlo = 0;
+            zhi = nz.m;
+            e = nz.e - 64;
+            d -= 64;
+            if d == 0 {
+            } else if d < 64 {
+                rlo = rhi << (64 - d) | rlo >> d | ((rlo << (64 - d)) != 0) as u64;
+                rhi = rhi >> d;
+            } else {
+                rlo = 1;
+                rhi = 0;
+            }
+        }
+    } else {
+        zhi = 0;
+        d = -d;
+        if d == 0 {
+            zlo = nz.m;
+        } else if d < 64 {
+            zlo = nz.m >> d | ((nz.m << (64 - d)) != 0) as u64;
+        } else {
+            zlo = 1;
+        }
+    }
+
+    /* add */
+    let mut sign: i32 = nx.sign ^ ny.sign;
+    let samesign: bool = (sign ^ nz.sign) == 0;
+    let mut nonzero: i32 = 1;
+    if samesign {
+        /* r += z */
+        rlo = rlo.wrapping_add(zlo);
+        rhi += zhi + (rlo < zlo) as u64;
+    } else {
+        /* r -= z */
+        let t = rlo;
+        rlo -= zlo;
+        rhi = rhi - zhi - (t < rlo) as u64;
+        if (rhi >> 63) != 0 {
+            rlo = (-(rlo as i64)) as u64;
+            rhi = (-(rhi as i64)) as u64 - (rlo != 0) as u64;
+            sign = (sign == 0) as i32;
+        }
+        nonzero = (rhi != 0) as i32;
+    }
+
+    /* set rhi to top 63bit of the result (last bit is sticky) */
+    if nonzero != 0 {
+        e += 64;
+        d = rhi.leading_zeros() as i32 - 1;
+        /* note: d > 0 */
+        rhi = rhi << d | rlo >> (64 - d) | ((rlo << d) != 0) as u64;
+    } else if rlo != 0 {
+        d = rlo.leading_zeros() as i32 - 1;
+        if d < 0 {
+            rhi = rlo >> 1 | (rlo & 1);
+        } else {
+            rhi = rlo << d;
+        }
+    } else {
+        /* exact +-0 */
+        return x * y + z;
+    }
+    e -= d;
+
+    /* convert to double */
+    let mut i: i64 = rhi as i64; /* i is in [1<<62,(1<<63)-1] */
+    if sign != 0 {
+        i = -i;
+    }
+    let mut r: f64 = i as f64; /* |r| is in [0x1p62,0x1p63] */
+
+    if e < -1022 - 62 {
+        /* result is subnormal before rounding */
+        if e == -1022 - 63 {
+            let mut c: f64 = x1p63;
+            if sign != 0 {
+                c = -c;
+            }
+            if r == c {
+                /* min normal after rounding, underflow depends
+                on arch behaviour which can be imitated by
+                a double to float conversion */
+                let fltmin: f32 = (x0_ffffff8p_63 * f32::MIN_POSITIVE as f64 * r) as f32;
+                return f64::MIN_POSITIVE / f32::MIN_POSITIVE as f64 * fltmin as f64;
+            }
+            /* one bit is lost when scaled, add another top bit to
+            only round once at conversion if it is inexact */
+            if (rhi << 53) != 0 {
+                i = (rhi >> 1 | (rhi & 1) | 1 << 62) as i64;
+                if sign != 0 {
+                    i = -i;
+                }
+                r = i as f64;
+                r = 2. * r - c; /* remove top bit */
+
+                /* raise underflow portably, such that it
+                cannot be optimized away */
+                {
+                    let tiny: f64 = f64::MIN_POSITIVE / f32::MIN_POSITIVE as f64 * r;
+                    r += (tiny * tiny) * (r - r);
+                }
+            }
+        } else {
+            /* only round once when scaled */
+            d = 10;
+            i = ((rhi >> d | ((rhi << (64 - d)) != 0) as u64) << d) as i64;
+            if sign != 0 {
+                i = -i;
+            }
+            r = i as f64;
+        }
+    }
+    scalbn(r, e)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fmaf.rs.html b/src/libm/math/fmaf.rs.html new file mode 100644 index 000000000..bf3f3682a --- /dev/null +++ b/src/libm/math/fmaf.rs.html @@ -0,0 +1,217 @@ +fmaf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_fmaf.c */
+/*-
+ * Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+use core::f32;
+use core::ptr::read_volatile;
+
+use super::fenv::{
+    feclearexcept, fegetround, feraiseexcept, fesetround, fetestexcept, FE_INEXACT, FE_TONEAREST,
+    FE_TOWARDZERO, FE_UNDERFLOW,
+};
+
+/*
+ * Fused multiply-add: Compute x * y + z with a single rounding error.
+ *
+ * A double has more than twice as much precision than a float, so
+ * direct double-precision arithmetic suffices, except where double
+ * rounding occurs.
+ */
+
+/// Floating multiply add (f32)
+///
+/// Computes `(x*y)+z`, rounded as one ternary operation:
+/// Computes the value (as if) to infinite precision and rounds once to the result format,
+/// according to the rounding mode characterized by the value of FLT_ROUNDS.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fmaf(x: f32, y: f32, mut z: f32) -> f32 {
+    let xy: f64;
+    let mut result: f64;
+    let mut ui: u64;
+    let e: i32;
+
+    xy = x as f64 * y as f64;
+    result = xy + z as f64;
+    ui = result.to_bits();
+    e = (ui >> 52) as i32 & 0x7ff;
+    /* Common case: The double precision result is fine. */
+    if (
+        /* not a halfway case */
+        ui & 0x1fffffff) != 0x10000000 ||
+        /* NaN */
+        e == 0x7ff ||
+        /* exact */
+        (result - xy == z as f64 && result - z as f64 == xy) ||
+        /* not round-to-nearest */
+        fegetround() != FE_TONEAREST
+    {
+        /*
+            underflow may not be raised correctly, example:
+            fmaf(0x1p-120f, 0x1p-120f, 0x1p-149f)
+        */
+        if e < 0x3ff - 126 && e >= 0x3ff - 149 && fetestexcept(FE_INEXACT) != 0 {
+            feclearexcept(FE_INEXACT);
+            // prevent `xy + vz` from being CSE'd with `xy + z` above
+            let vz: f32 = unsafe { read_volatile(&z) };
+            result = xy + vz as f64;
+            if fetestexcept(FE_INEXACT) != 0 {
+                feraiseexcept(FE_UNDERFLOW);
+            } else {
+                feraiseexcept(FE_INEXACT);
+            }
+        }
+        z = result as f32;
+        return z;
+    }
+
+    /*
+     * If result is inexact, and exactly halfway between two float values,
+     * we need to adjust the low-order bit in the direction of the error.
+     */
+    fesetround(FE_TOWARDZERO);
+    // prevent `vxy + z` from being CSE'd with `xy + z` above
+    let vxy: f64 = unsafe { read_volatile(&xy) };
+    let mut adjusted_result: f64 = vxy + z as f64;
+    fesetround(FE_TONEAREST);
+    if result == adjusted_result {
+        ui = adjusted_result.to_bits();
+        ui += 1;
+        adjusted_result = f64::from_bits(ui);
+    }
+    z = adjusted_result as f32;
+    z
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fmax.rs.html b/src/libm/math/fmax.rs.html new file mode 100644 index 000000000..b72b6942a --- /dev/null +++ b/src/libm/math/fmax.rs.html @@ -0,0 +1,29 @@ +fmax.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fmax(x: f64, y: f64) -> f64 {
+    // IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the
+    // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
+    // is either x or y, canonicalized (this means results might differ among implementations).
+    // When either x or y is a signalingNaN, then the result is according to 6.2.
+    //
+    // Since we do not support sNaN in Rust yet, we do not need to handle them.
+    // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
+    // multiplying by 1.0. Should switch to the `canonicalize` when it works.
+    (if x.is_nan() || x < y { y } else { x }) * 1.0
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fmaxf.rs.html b/src/libm/math/fmaxf.rs.html new file mode 100644 index 000000000..2e4b5f3c3 --- /dev/null +++ b/src/libm/math/fmaxf.rs.html @@ -0,0 +1,29 @@ +fmaxf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fmaxf(x: f32, y: f32) -> f32 {
+    // IEEE754 says: maxNum(x, y) is the canonicalized number y if x < y, x if y < x, the
+    // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
+    // is either x or y, canonicalized (this means results might differ among implementations).
+    // When either x or y is a signalingNaN, then the result is according to 6.2.
+    //
+    // Since we do not support sNaN in Rust yet, we do not need to handle them.
+    // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
+    // multiplying by 1.0. Should switch to the `canonicalize` when it works.
+    (if x.is_nan() || x < y { y } else { x }) * 1.0
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fmin.rs.html b/src/libm/math/fmin.rs.html new file mode 100644 index 000000000..1f489e6de --- /dev/null +++ b/src/libm/math/fmin.rs.html @@ -0,0 +1,29 @@ +fmin.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fmin(x: f64, y: f64) -> f64 {
+    // IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the
+    // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
+    // is either x or y, canonicalized (this means results might differ among implementations).
+    // When either x or y is a signalingNaN, then the result is according to 6.2.
+    //
+    // Since we do not support sNaN in Rust yet, we do not need to handle them.
+    // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
+    // multiplying by 1.0. Should switch to the `canonicalize` when it works.
+    (if y.is_nan() || x < y { x } else { y }) * 1.0
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fminf.rs.html b/src/libm/math/fminf.rs.html new file mode 100644 index 000000000..4f2097b60 --- /dev/null +++ b/src/libm/math/fminf.rs.html @@ -0,0 +1,29 @@ +fminf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fminf(x: f32, y: f32) -> f32 {
+    // IEEE754 says: minNum(x, y) is the canonicalized number x if x < y, y if y < x, the
+    // canonicalized number if one operand is a number and the other a quiet NaN. Otherwise it
+    // is either x or y, canonicalized (this means results might differ among implementations).
+    // When either x or y is a signalingNaN, then the result is according to 6.2.
+    //
+    // Since we do not support sNaN in Rust yet, we do not need to handle them.
+    // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by
+    // multiplying by 1.0. Should switch to the `canonicalize` when it works.
+    (if y.is_nan() || x < y { x } else { y }) * 1.0
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fmod.rs.html b/src/libm/math/fmod.rs.html new file mode 100644 index 000000000..1616c8173 --- /dev/null +++ b/src/libm/math/fmod.rs.html @@ -0,0 +1,165 @@ +fmod.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+
+use core::u64;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fmod(x: f64, y: f64) -> f64 {
+    let mut uxi = x.to_bits();
+    let mut uyi = y.to_bits();
+    let mut ex = (uxi >> 52 & 0x7ff) as i64;
+    let mut ey = (uyi >> 52 & 0x7ff) as i64;
+    let sx = uxi >> 63;
+    let mut i;
+
+    if uyi << 1 == 0 || y.is_nan() || ex == 0x7ff {
+        return (x * y) / (x * y);
+    }
+    if uxi << 1 <= uyi << 1 {
+        if uxi << 1 == uyi << 1 {
+            return 0.0 * x;
+        }
+        return x;
+    }
+
+    /* normalize x and y */
+    if ex == 0 {
+        i = uxi << 12;
+        while i >> 63 == 0 {
+            ex -= 1;
+            i <<= 1;
+        }
+        uxi <<= -ex + 1;
+    } else {
+        uxi &= u64::MAX >> 12;
+        uxi |= 1 << 52;
+    }
+    if ey == 0 {
+        i = uyi << 12;
+        while i >> 63 == 0 {
+            ey -= 1;
+            i <<= 1;
+        }
+        uyi <<= -ey + 1;
+    } else {
+        uyi &= u64::MAX >> 12;
+        uyi |= 1 << 52;
+    }
+
+    /* x mod y */
+    while ex > ey {
+        i = uxi.wrapping_sub(uyi);
+        if i >> 63 == 0 {
+            if i == 0 {
+                return 0.0 * x;
+            }
+            uxi = i;
+        }
+        uxi <<= 1;
+        ex -= 1;
+    }
+    i = uxi.wrapping_sub(uyi);
+    if i >> 63 == 0 {
+        if i == 0 {
+            return 0.0 * x;
+        }
+        uxi = i;
+    }
+    while uxi >> 52 == 0 {
+        uxi <<= 1;
+        ex -= 1;
+    }
+
+    /* scale result */
+    if ex > 0 {
+        uxi -= 1 << 52;
+        uxi |= (ex as u64) << 52;
+    } else {
+        uxi >>= -ex + 1;
+    }
+    uxi |= (sx as u64) << 63;
+
+    f64::from_bits(uxi)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/fmodf.rs.html b/src/libm/math/fmodf.rs.html new file mode 100644 index 000000000..220a7cf30 --- /dev/null +++ b/src/libm/math/fmodf.rs.html @@ -0,0 +1,183 @@ +fmodf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+
+use core::f32;
+use core::u32;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn fmodf(x: f32, y: f32) -> f32 {
+    let mut uxi = x.to_bits();
+    let mut uyi = y.to_bits();
+    let mut ex = (uxi >> 23 & 0xff) as i32;
+    let mut ey = (uyi >> 23 & 0xff) as i32;
+    let sx = uxi & 0x80000000;
+    let mut i;
+
+    if uyi << 1 == 0 || y.is_nan() || ex == 0xff {
+        return (x * y) / (x * y);
+    }
+
+    if uxi << 1 <= uyi << 1 {
+        if uxi << 1 == uyi << 1 {
+            return 0.0 * x;
+        }
+
+        return x;
+    }
+
+    /* normalize x and y */
+    if ex == 0 {
+        i = uxi << 9;
+        while i >> 31 == 0 {
+            ex -= 1;
+            i <<= 1;
+        }
+
+        uxi <<= -ex + 1;
+    } else {
+        uxi &= u32::MAX >> 9;
+        uxi |= 1 << 23;
+    }
+
+    if ey == 0 {
+        i = uyi << 9;
+        while i >> 31 == 0 {
+            ey -= 1;
+            i <<= 1;
+        }
+
+        uyi <<= -ey + 1;
+    } else {
+        uyi &= u32::MAX >> 9;
+        uyi |= 1 << 23;
+    }
+
+    /* x mod y */
+    while ex > ey {
+        i = uxi.wrapping_sub(uyi);
+        if i >> 31 == 0 {
+            if i == 0 {
+                return 0.0 * x;
+            }
+            uxi = i;
+        }
+        uxi <<= 1;
+
+        ex -= 1;
+    }
+
+    i = uxi.wrapping_sub(uyi);
+    if i >> 31 == 0 {
+        if i == 0 {
+            return 0.0 * x;
+        }
+        uxi = i;
+    }
+
+    while uxi >> 23 == 0 {
+        uxi <<= 1;
+        ex -= 1;
+    }
+
+    /* scale result up */
+    if ex > 0 {
+        uxi -= 1 << 23;
+        uxi |= (ex as u32) << 23;
+    } else {
+        uxi >>= -ex + 1;
+    }
+    uxi |= sx;
+
+    f32::from_bits(uxi)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/frexp.rs.html b/src/libm/math/frexp.rs.html new file mode 100644 index 000000000..3cb838d77 --- /dev/null +++ b/src/libm/math/frexp.rs.html @@ -0,0 +1,43 @@ +frexp.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+
+pub fn frexp(x: f64) -> (f64, i32) {
+    let mut y = x.to_bits();
+    let ee = ((y >> 52) & 0x7ff) as i32;
+
+    if ee == 0 {
+        if x != 0.0 {
+            let x1p64 = f64::from_bits(0x43f0000000000000);
+            let (x, e) = frexp(x * x1p64);
+            return (x, e - 64);
+        }
+        return (x, 0);
+    } else if ee == 0x7ff {
+        return (x, 0);
+    }
+
+    let e = ee - 0x3fe;
+    y &= 0x800fffffffffffff;
+    y |= 0x3fe0000000000000;
+    return (f64::from_bits(y), e);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/frexpf.rs.html b/src/libm/math/frexpf.rs.html new file mode 100644 index 000000000..47fff31b7 --- /dev/null +++ b/src/libm/math/frexpf.rs.html @@ -0,0 +1,45 @@ +frexpf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+
+pub fn frexpf(x: f32) -> (f32, i32) {
+    let mut y = x.to_bits();
+    let ee: i32 = ((y >> 23) & 0xff) as i32;
+
+    if ee == 0 {
+        if x != 0.0 {
+            let x1p64 = f32::from_bits(0x5f800000);
+            let (x, e) = frexpf(x * x1p64);
+            return (x, e - 64);
+        } else {
+            return (x, 0);
+        }
+    } else if ee == 0xff {
+        return (x, 0);
+    }
+
+    let e = ee - 0x7e;
+    y &= 0x807fffff;
+    y |= 0x3f000000;
+    (f32::from_bits(y), e)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/hypot.rs.html b/src/libm/math/hypot.rs.html new file mode 100644 index 000000000..8770ab488 --- /dev/null +++ b/src/libm/math/hypot.rs.html @@ -0,0 +1,155 @@ +hypot.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+
+use core::f64;
+
+use super::sqrt;
+
+const SPLIT: f64 = 134217728. + 1.; // 0x1p27 + 1 === (2 ^ 27) + 1
+
+#[inline]
+fn sq(x: f64) -> (f64, f64) {
+    let xh: f64;
+    let xl: f64;
+    let xc: f64;
+
+    xc = x * SPLIT;
+    xh = x - xc + xc;
+    xl = x - xh;
+    let hi = x * x;
+    let lo = xh * xh - hi + 2. * xh * xl + xl * xl;
+    (hi, lo)
+}
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn hypot(mut x: f64, mut y: f64) -> f64 {
+    let x1p700 = f64::from_bits(0x6bb0000000000000); // 0x1p700 === 2 ^ 700
+    let x1p_700 = f64::from_bits(0x1430000000000000); // 0x1p-700 === 2 ^ -700
+
+    let mut uxi = x.to_bits();
+    let mut uyi = y.to_bits();
+    let uti;
+    let ex: i64;
+    let ey: i64;
+    let mut z: f64;
+
+    /* arrange |x| >= |y| */
+    uxi &= -1i64 as u64 >> 1;
+    uyi &= -1i64 as u64 >> 1;
+    if uxi < uyi {
+        uti = uxi;
+        uxi = uyi;
+        uyi = uti;
+    }
+
+    /* special cases */
+    ex = (uxi >> 52) as i64;
+    ey = (uyi >> 52) as i64;
+    x = f64::from_bits(uxi);
+    y = f64::from_bits(uyi);
+    /* note: hypot(inf,nan) == inf */
+    if ey == 0x7ff {
+        return y;
+    }
+    if ex == 0x7ff || uyi == 0 {
+        return x;
+    }
+    /* note: hypot(x,y) ~= x + y*y/x/2 with inexact for small y/x */
+    /* 64 difference is enough for ld80 double_t */
+    if ex - ey > 64 {
+        return x + y;
+    }
+
+    /* precise sqrt argument in nearest rounding mode without overflow */
+    /* xh*xh must not overflow and xl*xl must not underflow in sq */
+    z = 1.;
+    if ex > 0x3ff + 510 {
+        z = x1p700;
+        x *= x1p_700;
+        y *= x1p_700;
+    } else if ey < 0x3ff - 450 {
+        z = x1p_700;
+        x *= x1p700;
+        y *= x1p700;
+    }
+    let (hx, lx) = sq(x);
+    let (hy, ly) = sq(y);
+    z * sqrt(ly + lx + hy + hx)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/hypotf.rs.html b/src/libm/math/hypotf.rs.html new file mode 100644 index 000000000..0ae67a5e1 --- /dev/null +++ b/src/libm/math/hypotf.rs.html @@ -0,0 +1,91 @@ +hypotf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+
+use core::f32;
+
+use super::sqrtf;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn hypotf(mut x: f32, mut y: f32) -> f32 {
+    let x1p90 = f32::from_bits(0x6c800000); // 0x1p90f === 2 ^ 90
+    let x1p_90 = f32::from_bits(0x12800000); // 0x1p-90f === 2 ^ -90
+
+    let mut uxi = x.to_bits();
+    let mut uyi = y.to_bits();
+    let uti;
+    let mut z: f32;
+
+    uxi &= -1i32 as u32 >> 1;
+    uyi &= -1i32 as u32 >> 1;
+    if uxi < uyi {
+        uti = uxi;
+        uxi = uyi;
+        uyi = uti;
+    }
+
+    x = f32::from_bits(uxi);
+    y = f32::from_bits(uyi);
+    if uyi == 0xff << 23 {
+        return y;
+    }
+    if uxi >= 0xff << 23 || uyi == 0 || uxi - uyi >= 25 << 23 {
+        return x + y;
+    }
+
+    z = 1.;
+    if uxi >= (0x7f + 60) << 23 {
+        z = x1p90;
+        x *= x1p_90;
+        y *= x1p_90;
+    } else if uyi < (0x7f - 60) << 23 {
+        z = x1p_90;
+        x *= x1p90;
+        y *= x1p90;
+    }
+    z * sqrtf((x as f64 * x as f64 + y as f64 * y as f64) as f32)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/ilogb.rs.html b/src/libm/math/ilogb.rs.html new file mode 100644 index 000000000..9e63f300b --- /dev/null +++ b/src/libm/math/ilogb.rs.html @@ -0,0 +1,65 @@ +ilogb.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+
+const FP_ILOGBNAN: i32 = -1 - 0x7fffffff;
+const FP_ILOGB0: i32 = FP_ILOGBNAN;
+
+pub fn ilogb(x: f64) -> i32 {
+    let mut i: u64 = x.to_bits();
+    let e = ((i >> 52) & 0x7ff) as i32;
+
+    if e == 0 {
+        i <<= 12;
+        if i == 0 {
+            force_eval!(0.0 / 0.0);
+            return FP_ILOGB0;
+        }
+        /* subnormal x */
+        let mut e = -0x3ff;
+        while (i >> 63) == 0 {
+            e -= 1;
+            i <<= 1;
+        }
+        e
+    } else if e == 0x7ff {
+        force_eval!(0.0 / 0.0);
+        if (i << 12) != 0 {
+            FP_ILOGBNAN
+        } else {
+            i32::max_value()
+        }
+    } else {
+        e - 0x3ff
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/ilogbf.rs.html b/src/libm/math/ilogbf.rs.html new file mode 100644 index 000000000..a229ec62a --- /dev/null +++ b/src/libm/math/ilogbf.rs.html @@ -0,0 +1,65 @@ +ilogbf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+
+const FP_ILOGBNAN: i32 = -1 - 0x7fffffff;
+const FP_ILOGB0: i32 = FP_ILOGBNAN;
+
+pub fn ilogbf(x: f32) -> i32 {
+    let mut i = x.to_bits();
+    let e = ((i >> 23) & 0xff) as i32;
+
+    if e == 0 {
+        i <<= 9;
+        if i == 0 {
+            force_eval!(0.0 / 0.0);
+            return FP_ILOGB0;
+        }
+        /* subnormal x */
+        let mut e = -0x7f;
+        while (i >> 31) == 0 {
+            e -= 1;
+            i <<= 1;
+        }
+        e
+    } else if e == 0xff {
+        force_eval!(0.0 / 0.0);
+        if (i << 9) != 0 {
+            FP_ILOGBNAN
+        } else {
+            i32::max_value()
+        }
+    } else {
+        e - 0x7f
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/j0.rs.html b/src/libm/math/j0.rs.html new file mode 100644 index 000000000..ab118e989 --- /dev/null +++ b/src/libm/math/j0.rs.html @@ -0,0 +1,847 @@ +j0.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_j0.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* j0(x), y0(x)
+ * Bessel function of the first and second kinds of order zero.
+ * Method -- j0(x):
+ *      1. For tiny x, we use j0(x) = 1 - x^2/4 + x^4/64 - ...
+ *      2. Reduce x to |x| since j0(x)=j0(-x),  and
+ *         for x in (0,2)
+ *              j0(x) = 1-z/4+ z^2*R0/S0,  where z = x*x;
+ *         (precision:  |j0-1+z/4-z^2R0/S0 |<2**-63.67 )
+ *         for x in (2,inf)
+ *              j0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)-q0(x)*sin(x0))
+ *         where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+ *         as follow:
+ *              cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
+ *                      = 1/sqrt(2) * (cos(x) + sin(x))
+ *              sin(x0) = sin(x)cos(pi/4)-cos(x)sin(pi/4)
+ *                      = 1/sqrt(2) * (sin(x) - cos(x))
+ *         (To avoid cancellation, use
+ *              sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ *          to compute the worse one.)
+ *
+ *      3 Special cases
+ *              j0(nan)= nan
+ *              j0(0) = 1
+ *              j0(inf) = 0
+ *
+ * Method -- y0(x):
+ *      1. For x<2.
+ *         Since
+ *              y0(x) = 2/pi*(j0(x)*(ln(x/2)+Euler) + x^2/4 - ...)
+ *         therefore y0(x)-2/pi*j0(x)*ln(x) is an even function.
+ *         We use the following function to approximate y0,
+ *              y0(x) = U(z)/V(z) + (2/pi)*(j0(x)*ln(x)), z= x^2
+ *         where
+ *              U(z) = u00 + u01*z + ... + u06*z^6
+ *              V(z) = 1  + v01*z + ... + v04*z^4
+ *         with absolute approximation error bounded by 2**-72.
+ *         Note: For tiny x, U/V = u0 and j0(x)~1, hence
+ *              y0(tiny) = u0 + (2/pi)*ln(tiny), (choose tiny<2**-27)
+ *      2. For x>=2.
+ *              y0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)+q0(x)*sin(x0))
+ *         where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+ *         by the method mentioned above.
+ *      3. Special cases: y0(0)=-inf, y0(x<0)=NaN, y0(inf)=0.
+ */
+
+use super::{cos, fabs, get_high_word, get_low_word, log, sin, sqrt};
+const INVSQRTPI: f64 = 5.64189583547756279280e-01; /* 0x3FE20DD7, 0x50429B6D */
+const TPI: f64 = 6.36619772367581382433e-01; /* 0x3FE45F30, 0x6DC9C883 */
+
+/* common method when |x|>=2 */
+fn common(ix: u32, x: f64, y0: bool) -> f64 {
+    let s: f64;
+    let mut c: f64;
+    let mut ss: f64;
+    let mut cc: f64;
+    let z: f64;
+
+    /*
+     * j0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x-pi/4)-q0(x)*sin(x-pi/4))
+     * y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x-pi/4)+q0(x)*cos(x-pi/4))
+     *
+     * sin(x-pi/4) = (sin(x) - cos(x))/sqrt(2)
+     * cos(x-pi/4) = (sin(x) + cos(x))/sqrt(2)
+     * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+     */
+    s = sin(x);
+    c = cos(x);
+    if y0 {
+        c = -c;
+    }
+    cc = s + c;
+    /* avoid overflow in 2*x, big ulp error when x>=0x1p1023 */
+    if ix < 0x7fe00000 {
+        ss = s - c;
+        z = -cos(2.0 * x);
+        if s * c < 0.0 {
+            cc = z / ss;
+        } else {
+            ss = z / cc;
+        }
+        if ix < 0x48000000 {
+            if y0 {
+                ss = -ss;
+            }
+            cc = pzero(x) * cc - qzero(x) * ss;
+        }
+    }
+    return INVSQRTPI * cc / sqrt(x);
+}
+
+/* R0/S0 on [0, 2.00] */
+const R02: f64 = 1.56249999999999947958e-02; /* 0x3F8FFFFF, 0xFFFFFFFD */
+const R03: f64 = -1.89979294238854721751e-04; /* 0xBF28E6A5, 0xB61AC6E9 */
+const R04: f64 = 1.82954049532700665670e-06; /* 0x3EBEB1D1, 0x0C503919 */
+const R05: f64 = -4.61832688532103189199e-09; /* 0xBE33D5E7, 0x73D63FCE */
+const S01: f64 = 1.56191029464890010492e-02; /* 0x3F8FFCE8, 0x82C8C2A4 */
+const S02: f64 = 1.16926784663337450260e-04; /* 0x3F1EA6D2, 0xDD57DBF4 */
+const S03: f64 = 5.13546550207318111446e-07; /* 0x3EA13B54, 0xCE84D5A9 */
+const S04: f64 = 1.16614003333790000205e-09; /* 0x3E1408BC, 0xF4745D8F */
+
+pub fn j0(mut x: f64) -> f64 {
+    let z: f64;
+    let r: f64;
+    let s: f64;
+    let mut ix: u32;
+
+    ix = get_high_word(x);
+    ix &= 0x7fffffff;
+
+    /* j0(+-inf)=0, j0(nan)=nan */
+    if ix >= 0x7ff00000 {
+        return 1.0 / (x * x);
+    }
+    x = fabs(x);
+
+    if ix >= 0x40000000 {
+        /* |x| >= 2 */
+        /* large ulp error near zeros: 2.4, 5.52, 8.6537,.. */
+        return common(ix, x, false);
+    }
+
+    /* 1 - x*x/4 + x*x*R(x^2)/S(x^2) */
+    if ix >= 0x3f200000 {
+        /* |x| >= 2**-13 */
+        /* up to 4ulp error close to 2 */
+        z = x * x;
+        r = z * (R02 + z * (R03 + z * (R04 + z * R05)));
+        s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * S04)));
+        return (1.0 + x / 2.0) * (1.0 - x / 2.0) + z * (r / s);
+    }
+
+    /* 1 - x*x/4 */
+    /* prevent underflow */
+    /* inexact should be raised when x!=0, this is not done correctly */
+    if ix >= 0x38000000 {
+        /* |x| >= 2**-127 */
+        x = 0.25 * x * x;
+    }
+    return 1.0 - x;
+}
+
+const U00: f64 = -7.38042951086872317523e-02; /* 0xBFB2E4D6, 0x99CBD01F */
+const U01: f64 = 1.76666452509181115538e-01; /* 0x3FC69D01, 0x9DE9E3FC */
+const U02: f64 = -1.38185671945596898896e-02; /* 0xBF8C4CE8, 0xB16CFA97 */
+const U03: f64 = 3.47453432093683650238e-04; /* 0x3F36C54D, 0x20B29B6B */
+const U04: f64 = -3.81407053724364161125e-06; /* 0xBECFFEA7, 0x73D25CAD */
+const U05: f64 = 1.95590137035022920206e-08; /* 0x3E550057, 0x3B4EABD4 */
+const U06: f64 = -3.98205194132103398453e-11; /* 0xBDC5E43D, 0x693FB3C8 */
+const V01: f64 = 1.27304834834123699328e-02; /* 0x3F8A1270, 0x91C9C71A */
+const V02: f64 = 7.60068627350353253702e-05; /* 0x3F13ECBB, 0xF578C6C1 */
+const V03: f64 = 2.59150851840457805467e-07; /* 0x3E91642D, 0x7FF202FD */
+const V04: f64 = 4.41110311332675467403e-10; /* 0x3DFE5018, 0x3BD6D9EF */
+
+pub fn y0(x: f64) -> f64 {
+    let z: f64;
+    let u: f64;
+    let v: f64;
+    let ix: u32;
+    let lx: u32;
+
+    ix = get_high_word(x);
+    lx = get_low_word(x);
+
+    /* y0(nan)=nan, y0(<0)=nan, y0(0)=-inf, y0(inf)=0 */
+    if ((ix << 1) | lx) == 0 {
+        return -1.0 / 0.0;
+    }
+    if (ix >> 31) != 0 {
+        return 0.0 / 0.0;
+    }
+    if ix >= 0x7ff00000 {
+        return 1.0 / x;
+    }
+
+    if ix >= 0x40000000 {
+        /* x >= 2 */
+        /* large ulp errors near zeros: 3.958, 7.086,.. */
+        return common(ix, x, true);
+    }
+
+    /* U(x^2)/V(x^2) + (2/pi)*j0(x)*log(x) */
+    if ix >= 0x3e400000 {
+        /* x >= 2**-27 */
+        /* large ulp error near the first zero, x ~= 0.89 */
+        z = x * x;
+        u = U00 + z * (U01 + z * (U02 + z * (U03 + z * (U04 + z * (U05 + z * U06)))));
+        v = 1.0 + z * (V01 + z * (V02 + z * (V03 + z * V04)));
+        return u / v + TPI * (j0(x) * log(x));
+    }
+    return U00 + TPI * log(x);
+}
+
+/* The asymptotic expansions of pzero is
+ *      1 - 9/128 s^2 + 11025/98304 s^4 - ...,  where s = 1/x.
+ * For x >= 2, We approximate pzero by
+ *      pzero(x) = 1 + (R/S)
+ * where  R = pR0 + pR1*s^2 + pR2*s^4 + ... + pR5*s^10
+ *        S = 1 + pS0*s^2 + ... + pS4*s^10
+ * and
+ *      | pzero(x)-1-R/S | <= 2  ** ( -60.26)
+ */
+const PR8: [f64; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.00000000000000000000e+00,  /* 0x00000000, 0x00000000 */
+    -7.03124999999900357484e-02, /* 0xBFB1FFFF, 0xFFFFFD32 */
+    -8.08167041275349795626e+00, /* 0xC02029D0, 0xB44FA779 */
+    -2.57063105679704847262e+02, /* 0xC0701102, 0x7B19E863 */
+    -2.48521641009428822144e+03, /* 0xC0A36A6E, 0xCD4DCAFC */
+    -5.25304380490729545272e+03, /* 0xC0B4850B, 0x36CC643D */
+];
+const PS8: [f64; 5] = [
+    1.16534364619668181717e+02, /* 0x405D2233, 0x07A96751 */
+    3.83374475364121826715e+03, /* 0x40ADF37D, 0x50596938 */
+    4.05978572648472545552e+04, /* 0x40E3D2BB, 0x6EB6B05F */
+    1.16752972564375915681e+05, /* 0x40FC810F, 0x8F9FA9BD */
+    4.76277284146730962675e+04, /* 0x40E74177, 0x4F2C49DC */
+];
+
+const PR5: [f64; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    -1.14125464691894502584e-11, /* 0xBDA918B1, 0x47E495CC */
+    -7.03124940873599280078e-02, /* 0xBFB1FFFF, 0xE69AFBC6 */
+    -4.15961064470587782438e+00, /* 0xC010A370, 0xF90C6BBF */
+    -6.76747652265167261021e+01, /* 0xC050EB2F, 0x5A7D1783 */
+    -3.31231299649172967747e+02, /* 0xC074B3B3, 0x6742CC63 */
+    -3.46433388365604912451e+02, /* 0xC075A6EF, 0x28A38BD7 */
+];
+const PS5: [f64; 5] = [
+    6.07539382692300335975e+01, /* 0x404E6081, 0x0C98C5DE */
+    1.05125230595704579173e+03, /* 0x40906D02, 0x5C7E2864 */
+    5.97897094333855784498e+03, /* 0x40B75AF8, 0x8FBE1D60 */
+    9.62544514357774460223e+03, /* 0x40C2CCB8, 0xFA76FA38 */
+    2.40605815922939109441e+03, /* 0x40A2CC1D, 0xC70BE864 */
+];
+
+const PR3: [f64; 6] = [
+    /* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+    -2.54704601771951915620e-09, /* 0xBE25E103, 0x6FE1AA86 */
+    -7.03119616381481654654e-02, /* 0xBFB1FFF6, 0xF7C0E24B */
+    -2.40903221549529611423e+00, /* 0xC00345B2, 0xAEA48074 */
+    -2.19659774734883086467e+01, /* 0xC035F74A, 0x4CB94E14 */
+    -5.80791704701737572236e+01, /* 0xC04D0A22, 0x420A1A45 */
+    -3.14479470594888503854e+01, /* 0xC03F72AC, 0xA892D80F */
+];
+const PS3: [f64; 5] = [
+    3.58560338055209726349e+01, /* 0x4041ED92, 0x84077DD3 */
+    3.61513983050303863820e+02, /* 0x40769839, 0x464A7C0E */
+    1.19360783792111533330e+03, /* 0x4092A66E, 0x6D1061D6 */
+    1.12799679856907414432e+03, /* 0x40919FFC, 0xB8C39B7E */
+    1.73580930813335754692e+02, /* 0x4065B296, 0xFC379081 */
+];
+
+const PR2: [f64; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    -8.87534333032526411254e-08, /* 0xBE77D316, 0xE927026D */
+    -7.03030995483624743247e-02, /* 0xBFB1FF62, 0x495E1E42 */
+    -1.45073846780952986357e+00, /* 0xBFF73639, 0x8A24A843 */
+    -7.63569613823527770791e+00, /* 0xC01E8AF3, 0xEDAFA7F3 */
+    -1.11931668860356747786e+01, /* 0xC02662E6, 0xC5246303 */
+    -3.23364579351335335033e+00, /* 0xC009DE81, 0xAF8FE70F */
+];
+const PS2: [f64; 5] = [
+    2.22202997532088808441e+01, /* 0x40363865, 0x908B5959 */
+    1.36206794218215208048e+02, /* 0x4061069E, 0x0EE8878F */
+    2.70470278658083486789e+02, /* 0x4070E786, 0x42EA079B */
+    1.53875394208320329881e+02, /* 0x40633C03, 0x3AB6FAFF */
+    1.46576176948256193810e+01, /* 0x402D50B3, 0x44391809 */
+];
+
+fn pzero(x: f64) -> f64 {
+    let p: &[f64; 6];
+    let q: &[f64; 5];
+    let z: f64;
+    let r: f64;
+    let s: f64;
+    let mut ix: u32;
+
+    ix = get_high_word(x);
+    ix &= 0x7fffffff;
+    if ix >= 0x40200000 {
+        p = &PR8;
+        q = &PS8;
+    } else if ix >= 0x40122E8B {
+        p = &PR5;
+        q = &PS5;
+    } else if ix >= 0x4006DB6D {
+        p = &PR3;
+        q = &PS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &PR2;
+        q = &PS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
+    return 1.0 + r / s;
+}
+
+/* For x >= 8, the asymptotic expansions of qzero is
+ *      -1/8 s + 75/1024 s^3 - ..., where s = 1/x.
+ * We approximate pzero by
+ *      qzero(x) = s*(-1.25 + (R/S))
+ * where  R = qR0 + qR1*s^2 + qR2*s^4 + ... + qR5*s^10
+ *        S = 1 + qS0*s^2 + ... + qS5*s^12
+ * and
+ *      | qzero(x)/s +1.25-R/S | <= 2  ** ( -61.22)
+ */
+const QR8: [f64; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+    7.32421874999935051953e-02, /* 0x3FB2BFFF, 0xFFFFFE2C */
+    1.17682064682252693899e+01, /* 0x40278952, 0x5BB334D6 */
+    5.57673380256401856059e+02, /* 0x40816D63, 0x15301825 */
+    8.85919720756468632317e+03, /* 0x40C14D99, 0x3E18F46D */
+    3.70146267776887834771e+04, /* 0x40E212D4, 0x0E901566 */
+];
+const QS8: [f64; 6] = [
+    1.63776026895689824414e+02,  /* 0x406478D5, 0x365B39BC */
+    8.09834494656449805916e+03,  /* 0x40BFA258, 0x4E6B0563 */
+    1.42538291419120476348e+05,  /* 0x41016652, 0x54D38C3F */
+    8.03309257119514397345e+05,  /* 0x412883DA, 0x83A52B43 */
+    8.40501579819060512818e+05,  /* 0x4129A66B, 0x28DE0B3D */
+    -3.43899293537866615225e+05, /* 0xC114FD6D, 0x2C9530C5 */
+];
+
+const QR5: [f64; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    1.84085963594515531381e-11, /* 0x3DB43D8F, 0x29CC8CD9 */
+    7.32421766612684765896e-02, /* 0x3FB2BFFF, 0xD172B04C */
+    5.83563508962056953777e+00, /* 0x401757B0, 0xB9953DD3 */
+    1.35111577286449829671e+02, /* 0x4060E392, 0x0A8788E9 */
+    1.02724376596164097464e+03, /* 0x40900CF9, 0x9DC8C481 */
+    1.98997785864605384631e+03, /* 0x409F17E9, 0x53C6E3A6 */
+];
+const QS5: [f64; 6] = [
+    8.27766102236537761883e+01,  /* 0x4054B1B3, 0xFB5E1543 */
+    2.07781416421392987104e+03,  /* 0x40A03BA0, 0xDA21C0CE */
+    1.88472887785718085070e+04,  /* 0x40D267D2, 0x7B591E6D */
+    5.67511122894947329769e+04,  /* 0x40EBB5E3, 0x97E02372 */
+    3.59767538425114471465e+04,  /* 0x40E19118, 0x1F7A54A0 */
+    -5.35434275601944773371e+03, /* 0xC0B4EA57, 0xBEDBC609 */
+];
+
+const QR3: [f64; 6] = [
+    /* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+    4.37741014089738620906e-09, /* 0x3E32CD03, 0x6ADECB82 */
+    7.32411180042911447163e-02, /* 0x3FB2BFEE, 0x0E8D0842 */
+    3.34423137516170720929e+00, /* 0x400AC0FC, 0x61149CF5 */
+    4.26218440745412650017e+01, /* 0x40454F98, 0x962DAEDD */
+    1.70808091340565596283e+02, /* 0x406559DB, 0xE25EFD1F */
+    1.66733948696651168575e+02, /* 0x4064D77C, 0x81FA21E0 */
+];
+const QS3: [f64; 6] = [
+    4.87588729724587182091e+01,  /* 0x40486122, 0xBFE343A6 */
+    7.09689221056606015736e+02,  /* 0x40862D83, 0x86544EB3 */
+    3.70414822620111362994e+03,  /* 0x40ACF04B, 0xE44DFC63 */
+    6.46042516752568917582e+03,  /* 0x40B93C6C, 0xD7C76A28 */
+    2.51633368920368957333e+03,  /* 0x40A3A8AA, 0xD94FB1C0 */
+    -1.49247451836156386662e+02, /* 0xC062A7EB, 0x201CF40F */
+];
+
+const QR2: [f64; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    1.50444444886983272379e-07, /* 0x3E84313B, 0x54F76BDB */
+    7.32234265963079278272e-02, /* 0x3FB2BEC5, 0x3E883E34 */
+    1.99819174093815998816e+00, /* 0x3FFFF897, 0xE727779C */
+    1.44956029347885735348e+01, /* 0x402CFDBF, 0xAAF96FE5 */
+    3.16662317504781540833e+01, /* 0x403FAA8E, 0x29FBDC4A */
+    1.62527075710929267416e+01, /* 0x403040B1, 0x71814BB4 */
+];
+const QS2: [f64; 6] = [
+    3.03655848355219184498e+01,  /* 0x403E5D96, 0xF7C07AED */
+    2.69348118608049844624e+02,  /* 0x4070D591, 0xE4D14B40 */
+    8.44783757595320139444e+02,  /* 0x408A6645, 0x22B3BF22 */
+    8.82935845112488550512e+02,  /* 0x408B977C, 0x9C5CC214 */
+    2.12666388511798828631e+02,  /* 0x406A9553, 0x0E001365 */
+    -5.31095493882666946917e+00, /* 0xC0153E6A, 0xF8B32931 */
+];
+
+fn qzero(x: f64) -> f64 {
+    let p: &[f64; 6];
+    let q: &[f64; 6];
+    let s: f64;
+    let r: f64;
+    let z: f64;
+    let mut ix: u32;
+
+    ix = get_high_word(x);
+    ix &= 0x7fffffff;
+    if ix >= 0x40200000 {
+        p = &QR8;
+        q = &QS8;
+    } else if ix >= 0x40122E8B {
+        p = &QR5;
+        q = &QS5;
+    } else if ix >= 0x4006DB6D {
+        p = &QR3;
+        q = &QS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &QR2;
+        q = &QS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
+    return (-0.125 + r / s) / x;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/j0f.rs.html b/src/libm/math/j0f.rs.html new file mode 100644 index 000000000..cc65cdeb8 --- /dev/null +++ b/src/libm/math/j0f.rs.html @@ -0,0 +1,721 @@ +j0f.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_j0f.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{cosf, fabsf, logf, sinf, sqrtf};
+
+const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
+const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
+
+fn common(ix: u32, x: f32, y0: bool) -> f32 {
+    let z: f32;
+    let s: f32;
+    let mut c: f32;
+    let mut ss: f32;
+    let mut cc: f32;
+    /*
+     * j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
+     * y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
+     */
+    s = sinf(x);
+    c = cosf(x);
+    if y0 {
+        c = -c;
+    }
+    cc = s + c;
+    if ix < 0x7f000000 {
+        ss = s - c;
+        z = -cosf(2.0 * x);
+        if s * c < 0.0 {
+            cc = z / ss;
+        } else {
+            ss = z / cc;
+        }
+        if ix < 0x58800000 {
+            if y0 {
+                ss = -ss;
+            }
+            cc = pzerof(x) * cc - qzerof(x) * ss;
+        }
+    }
+    return INVSQRTPI * cc / sqrtf(x);
+}
+
+/* R0/S0 on [0, 2.00] */
+const R02: f32 = 1.5625000000e-02; /* 0x3c800000 */
+const R03: f32 = -1.8997929874e-04; /* 0xb947352e */
+const R04: f32 = 1.8295404516e-06; /* 0x35f58e88 */
+const R05: f32 = -4.6183270541e-09; /* 0xb19eaf3c */
+const S01: f32 = 1.5619102865e-02; /* 0x3c7fe744 */
+const S02: f32 = 1.1692678527e-04; /* 0x38f53697 */
+const S03: f32 = 5.1354652442e-07; /* 0x3509daa6 */
+const S04: f32 = 1.1661400734e-09; /* 0x30a045e8 */
+
+pub fn j0f(mut x: f32) -> f32 {
+    let z: f32;
+    let r: f32;
+    let s: f32;
+    let mut ix: u32;
+
+    ix = x.to_bits();
+    ix &= 0x7fffffff;
+    if ix >= 0x7f800000 {
+        return 1.0 / (x * x);
+    }
+    x = fabsf(x);
+
+    if ix >= 0x40000000 {
+        /* |x| >= 2 */
+        /* large ulp error near zeros */
+        return common(ix, x, false);
+    }
+    if ix >= 0x3a000000 {
+        /* |x| >= 2**-11 */
+        /* up to 4ulp error near 2 */
+        z = x * x;
+        r = z * (R02 + z * (R03 + z * (R04 + z * R05)));
+        s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * S04)));
+        return (1.0 + x / 2.0) * (1.0 - x / 2.0) + z * (r / s);
+    }
+    if ix >= 0x21800000 {
+        /* |x| >= 2**-60 */
+        x = 0.25 * x * x;
+    }
+    return 1.0 - x;
+}
+
+const U00: f32 = -7.3804296553e-02; /* 0xbd9726b5 */
+const U01: f32 = 1.7666645348e-01; /* 0x3e34e80d */
+const U02: f32 = -1.3818567619e-02; /* 0xbc626746 */
+const U03: f32 = 3.4745343146e-04; /* 0x39b62a69 */
+const U04: f32 = -3.8140706238e-06; /* 0xb67ff53c */
+const U05: f32 = 1.9559013964e-08; /* 0x32a802ba */
+const U06: f32 = -3.9820518410e-11; /* 0xae2f21eb */
+const V01: f32 = 1.2730483897e-02; /* 0x3c509385 */
+const V02: f32 = 7.6006865129e-05; /* 0x389f65e0 */
+const V03: f32 = 2.5915085189e-07; /* 0x348b216c */
+const V04: f32 = 4.4111031494e-10; /* 0x2ff280c2 */
+
+pub fn y0f(x: f32) -> f32 {
+    let z: f32;
+    let u: f32;
+    let v: f32;
+    let ix: u32;
+
+    ix = x.to_bits();
+    if (ix & 0x7fffffff) == 0 {
+        return -1.0 / 0.0;
+    }
+    if (ix >> 31) != 0 {
+        return 0.0 / 0.0;
+    }
+    if ix >= 0x7f800000 {
+        return 1.0 / x;
+    }
+    if ix >= 0x40000000 {
+        /* |x| >= 2.0 */
+        /* large ulp error near zeros */
+        return common(ix, x, true);
+    }
+    if ix >= 0x39000000 {
+        /* x >= 2**-13 */
+        /* large ulp error at x ~= 0.89 */
+        z = x * x;
+        u = U00 + z * (U01 + z * (U02 + z * (U03 + z * (U04 + z * (U05 + z * U06)))));
+        v = 1.0 + z * (V01 + z * (V02 + z * (V03 + z * V04)));
+        return u / v + TPI * (j0f(x) * logf(x));
+    }
+    return U00 + TPI * logf(x);
+}
+
+/* The asymptotic expansions of pzero is
+ *      1 - 9/128 s^2 + 11025/98304 s^4 - ...,  where s = 1/x.
+ * For x >= 2, We approximate pzero by
+ *      pzero(x) = 1 + (R/S)
+ * where  R = pR0 + pR1*s^2 + pR2*s^4 + ... + pR5*s^10
+ *        S = 1 + pS0*s^2 + ... + pS4*s^10
+ * and
+ *      | pzero(x)-1-R/S | <= 2  ** ( -60.26)
+ */
+const PR8: [f32; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.0000000000e+00,  /* 0x00000000 */
+    -7.0312500000e-02, /* 0xbd900000 */
+    -8.0816707611e+00, /* 0xc1014e86 */
+    -2.5706311035e+02, /* 0xc3808814 */
+    -2.4852163086e+03, /* 0xc51b5376 */
+    -5.2530439453e+03, /* 0xc5a4285a */
+];
+const PS8: [f32; 5] = [
+    1.1653436279e+02, /* 0x42e91198 */
+    3.8337448730e+03, /* 0x456f9beb */
+    4.0597855469e+04, /* 0x471e95db */
+    1.1675296875e+05, /* 0x47e4087c */
+    4.7627726562e+04, /* 0x473a0bba */
+];
+const PR5: [f32; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    -1.1412546255e-11, /* 0xad48c58a */
+    -7.0312492549e-02, /* 0xbd8fffff */
+    -4.1596107483e+00, /* 0xc0851b88 */
+    -6.7674766541e+01, /* 0xc287597b */
+    -3.3123129272e+02, /* 0xc3a59d9b */
+    -3.4643338013e+02, /* 0xc3ad3779 */
+];
+const PS5: [f32; 5] = [
+    6.0753936768e+01, /* 0x42730408 */
+    1.0512523193e+03, /* 0x44836813 */
+    5.9789707031e+03, /* 0x45bad7c4 */
+    9.6254453125e+03, /* 0x461665c8 */
+    2.4060581055e+03, /* 0x451660ee */
+];
+
+const PR3: [f32; 6] = [
+    /* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+    -2.5470459075e-09, /* 0xb12f081b */
+    -7.0311963558e-02, /* 0xbd8fffb8 */
+    -2.4090321064e+00, /* 0xc01a2d95 */
+    -2.1965976715e+01, /* 0xc1afba52 */
+    -5.8079170227e+01, /* 0xc2685112 */
+    -3.1447946548e+01, /* 0xc1fb9565 */
+];
+const PS3: [f32; 5] = [
+    3.5856033325e+01, /* 0x420f6c94 */
+    3.6151397705e+02, /* 0x43b4c1ca */
+    1.1936077881e+03, /* 0x44953373 */
+    1.1279968262e+03, /* 0x448cffe6 */
+    1.7358093262e+02, /* 0x432d94b8 */
+];
+
+const PR2: [f32; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    -8.8753431271e-08, /* 0xb3be98b7 */
+    -7.0303097367e-02, /* 0xbd8ffb12 */
+    -1.4507384300e+00, /* 0xbfb9b1cc */
+    -7.6356959343e+00, /* 0xc0f4579f */
+    -1.1193166733e+01, /* 0xc1331736 */
+    -3.2336456776e+00, /* 0xc04ef40d */
+];
+const PS2: [f32; 5] = [
+    2.2220300674e+01, /* 0x41b1c32d */
+    1.3620678711e+02, /* 0x430834f0 */
+    2.7047027588e+02, /* 0x43873c32 */
+    1.5387539673e+02, /* 0x4319e01a */
+    1.4657617569e+01, /* 0x416a859a */
+];
+
+fn pzerof(x: f32) -> f32 {
+    let p: &[f32; 6];
+    let q: &[f32; 5];
+    let z: f32;
+    let r: f32;
+    let s: f32;
+    let mut ix: u32;
+
+    ix = x.to_bits();
+    ix &= 0x7fffffff;
+    if ix >= 0x41000000 {
+        p = &PR8;
+        q = &PS8;
+    } else if ix >= 0x409173eb {
+        p = &PR5;
+        q = &PS5;
+    } else if ix >= 0x4036d917 {
+        p = &PR3;
+        q = &PS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &PR2;
+        q = &PS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
+    return 1.0 + r / s;
+}
+
+/* For x >= 8, the asymptotic expansions of qzero is
+ *      -1/8 s + 75/1024 s^3 - ..., where s = 1/x.
+ * We approximate pzero by
+ *      qzero(x) = s*(-1.25 + (R/S))
+ * where  R = qR0 + qR1*s^2 + qR2*s^4 + ... + qR5*s^10
+ *        S = 1 + qS0*s^2 + ... + qS5*s^12
+ * and
+ *      | qzero(x)/s +1.25-R/S | <= 2  ** ( -61.22)
+ */
+const QR8: [f32; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.0000000000e+00, /* 0x00000000 */
+    7.3242187500e-02, /* 0x3d960000 */
+    1.1768206596e+01, /* 0x413c4a93 */
+    5.5767340088e+02, /* 0x440b6b19 */
+    8.8591972656e+03, /* 0x460a6cca */
+    3.7014625000e+04, /* 0x471096a0 */
+];
+const QS8: [f32; 6] = [
+    1.6377603149e+02,  /* 0x4323c6aa */
+    8.0983447266e+03,  /* 0x45fd12c2 */
+    1.4253829688e+05,  /* 0x480b3293 */
+    8.0330925000e+05,  /* 0x49441ed4 */
+    8.4050156250e+05,  /* 0x494d3359 */
+    -3.4389928125e+05, /* 0xc8a7eb69 */
+];
+
+const QR5: [f32; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    1.8408595828e-11, /* 0x2da1ec79 */
+    7.3242180049e-02, /* 0x3d95ffff */
+    5.8356351852e+00, /* 0x40babd86 */
+    1.3511157227e+02, /* 0x43071c90 */
+    1.0272437744e+03, /* 0x448067cd */
+    1.9899779053e+03, /* 0x44f8bf4b */
+];
+const QS5: [f32; 6] = [
+    8.2776611328e+01,  /* 0x42a58da0 */
+    2.0778142090e+03,  /* 0x4501dd07 */
+    1.8847289062e+04,  /* 0x46933e94 */
+    5.6751113281e+04,  /* 0x475daf1d */
+    3.5976753906e+04,  /* 0x470c88c1 */
+    -5.3543427734e+03, /* 0xc5a752be */
+];
+
+const QR3: [f32; 6] = [
+    /* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+    4.3774099900e-09, /* 0x3196681b */
+    7.3241114616e-02, /* 0x3d95ff70 */
+    3.3442313671e+00, /* 0x405607e3 */
+    4.2621845245e+01, /* 0x422a7cc5 */
+    1.7080809021e+02, /* 0x432acedf */
+    1.6673394775e+02, /* 0x4326bbe4 */
+];
+const QS3: [f32; 6] = [
+    4.8758872986e+01,  /* 0x42430916 */
+    7.0968920898e+02,  /* 0x44316c1c */
+    3.7041481934e+03,  /* 0x4567825f */
+    6.4604252930e+03,  /* 0x45c9e367 */
+    2.5163337402e+03,  /* 0x451d4557 */
+    -1.4924745178e+02, /* 0xc3153f59 */
+];
+
+const QR2: [f32; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    1.5044444979e-07, /* 0x342189db */
+    7.3223426938e-02, /* 0x3d95f62a */
+    1.9981917143e+00, /* 0x3fffc4bf */
+    1.4495602608e+01, /* 0x4167edfd */
+    3.1666231155e+01, /* 0x41fd5471 */
+    1.6252708435e+01, /* 0x4182058c */
+];
+const QS2: [f32; 6] = [
+    3.0365585327e+01,  /* 0x41f2ecb8 */
+    2.6934811401e+02,  /* 0x4386ac8f */
+    8.4478375244e+02,  /* 0x44533229 */
+    8.8293585205e+02,  /* 0x445cbbe5 */
+    2.1266638184e+02,  /* 0x4354aa98 */
+    -5.3109550476e+00, /* 0xc0a9f358 */
+];
+
+fn qzerof(x: f32) -> f32 {
+    let p: &[f32; 6];
+    let q: &[f32; 6];
+    let s: f32;
+    let r: f32;
+    let z: f32;
+    let mut ix: u32;
+
+    ix = x.to_bits();
+    ix &= 0x7fffffff;
+    if ix >= 0x41000000 {
+        p = &QR8;
+        q = &QS8;
+    } else if ix >= 0x409173eb {
+        p = &QR5;
+        q = &QS5;
+    } else if ix >= 0x4036d917 {
+        p = &QR3;
+        q = &QS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &QR2;
+        q = &QS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
+    return (-0.125 + r / s) / x;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/j1.rs.html b/src/libm/math/j1.rs.html new file mode 100644 index 000000000..74913d605 --- /dev/null +++ b/src/libm/math/j1.rs.html @@ -0,0 +1,831 @@ +j1.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_j1.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* j1(x), y1(x)
+ * Bessel function of the first and second kinds of order zero.
+ * Method -- j1(x):
+ *      1. For tiny x, we use j1(x) = x/2 - x^3/16 + x^5/384 - ...
+ *      2. Reduce x to |x| since j1(x)=-j1(-x),  and
+ *         for x in (0,2)
+ *              j1(x) = x/2 + x*z*R0/S0,  where z = x*x;
+ *         (precision:  |j1/x - 1/2 - R0/S0 |<2**-61.51 )
+ *         for x in (2,inf)
+ *              j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
+ *              y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+ *         where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+ *         as follow:
+ *              cos(x1) =  cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
+ *                      =  1/sqrt(2) * (sin(x) - cos(x))
+ *              sin(x1) =  sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ *                      = -1/sqrt(2) * (sin(x) + cos(x))
+ *         (To avoid cancellation, use
+ *              sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ *          to compute the worse one.)
+ *
+ *      3 Special cases
+ *              j1(nan)= nan
+ *              j1(0) = 0
+ *              j1(inf) = 0
+ *
+ * Method -- y1(x):
+ *      1. screen out x<=0 cases: y1(0)=-inf, y1(x<0)=NaN
+ *      2. For x<2.
+ *         Since
+ *              y1(x) = 2/pi*(j1(x)*(ln(x/2)+Euler)-1/x-x/2+5/64*x^3-...)
+ *         therefore y1(x)-2/pi*j1(x)*ln(x)-1/x is an odd function.
+ *         We use the following function to approximate y1,
+ *              y1(x) = x*U(z)/V(z) + (2/pi)*(j1(x)*ln(x)-1/x), z= x^2
+ *         where for x in [0,2] (abs err less than 2**-65.89)
+ *              U(z) = U0[0] + U0[1]*z + ... + U0[4]*z^4
+ *              V(z) = 1  + v0[0]*z + ... + v0[4]*z^5
+ *         Note: For tiny x, 1/x dominate y1 and hence
+ *              y1(tiny) = -2/pi/tiny, (choose tiny<2**-54)
+ *      3. For x>=2.
+ *              y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+ *         where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+ *         by method mentioned above.
+ */
+
+use super::{cos, fabs, get_high_word, get_low_word, log, sin, sqrt};
+
+const INVSQRTPI: f64 = 5.64189583547756279280e-01; /* 0x3FE20DD7, 0x50429B6D */
+const TPI: f64 = 6.36619772367581382433e-01; /* 0x3FE45F30, 0x6DC9C883 */
+
+fn common(ix: u32, x: f64, y1: bool, sign: bool) -> f64 {
+    let z: f64;
+    let mut s: f64;
+    let c: f64;
+    let mut ss: f64;
+    let mut cc: f64;
+
+    /*
+     * j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x-3pi/4)-q1(x)*sin(x-3pi/4))
+     * y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x-3pi/4)+q1(x)*cos(x-3pi/4))
+     *
+     * sin(x-3pi/4) = -(sin(x) + cos(x))/sqrt(2)
+     * cos(x-3pi/4) = (sin(x) - cos(x))/sqrt(2)
+     * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+     */
+    s = sin(x);
+    if y1 {
+        s = -s;
+    }
+    c = cos(x);
+    cc = s - c;
+    if ix < 0x7fe00000 {
+        /* avoid overflow in 2*x */
+        ss = -s - c;
+        z = cos(2.0 * x);
+        if s * c > 0.0 {
+            cc = z / ss;
+        } else {
+            ss = z / cc;
+        }
+        if ix < 0x48000000 {
+            if y1 {
+                ss = -ss;
+            }
+            cc = pone(x) * cc - qone(x) * ss;
+        }
+    }
+    if sign {
+        cc = -cc;
+    }
+    return INVSQRTPI * cc / sqrt(x);
+}
+
+/* R0/S0 on [0,2] */
+const R00: f64 = -6.25000000000000000000e-02; /* 0xBFB00000, 0x00000000 */
+const R01: f64 = 1.40705666955189706048e-03; /* 0x3F570D9F, 0x98472C61 */
+const R02: f64 = -1.59955631084035597520e-05; /* 0xBEF0C5C6, 0xBA169668 */
+const R03: f64 = 4.96727999609584448412e-08; /* 0x3E6AAAFA, 0x46CA0BD9 */
+const S01: f64 = 1.91537599538363460805e-02; /* 0x3F939D0B, 0x12637E53 */
+const S02: f64 = 1.85946785588630915560e-04; /* 0x3F285F56, 0xB9CDF664 */
+const S03: f64 = 1.17718464042623683263e-06; /* 0x3EB3BFF8, 0x333F8498 */
+const S04: f64 = 5.04636257076217042715e-09; /* 0x3E35AC88, 0xC97DFF2C */
+const S05: f64 = 1.23542274426137913908e-11; /* 0x3DAB2ACF, 0xCFB97ED8 */
+
+pub fn j1(x: f64) -> f64 {
+    let mut z: f64;
+    let r: f64;
+    let s: f64;
+    let mut ix: u32;
+    let sign: bool;
+
+    ix = get_high_word(x);
+    sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+    if ix >= 0x7ff00000 {
+        return 1.0 / (x * x);
+    }
+    if ix >= 0x40000000 {
+        /* |x| >= 2 */
+        return common(ix, fabs(x), false, sign);
+    }
+    if ix >= 0x38000000 {
+        /* |x| >= 2**-127 */
+        z = x * x;
+        r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
+        s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
+        z = r / s;
+    } else {
+        /* avoid underflow, raise inexact if x!=0 */
+        z = x;
+    }
+    return (0.5 + z) * x;
+}
+
+const U0: [f64; 5] = [
+    -1.96057090646238940668e-01, /* 0xBFC91866, 0x143CBC8A */
+    5.04438716639811282616e-02,  /* 0x3FA9D3C7, 0x76292CD1 */
+    -1.91256895875763547298e-03, /* 0xBF5F55E5, 0x4844F50F */
+    2.35252600561610495928e-05,  /* 0x3EF8AB03, 0x8FA6B88E */
+    -9.19099158039878874504e-08, /* 0xBE78AC00, 0x569105B8 */
+];
+const V0: [f64; 5] = [
+    1.99167318236649903973e-02, /* 0x3F94650D, 0x3F4DA9F0 */
+    2.02552581025135171496e-04, /* 0x3F2A8C89, 0x6C257764 */
+    1.35608801097516229404e-06, /* 0x3EB6C05A, 0x894E8CA6 */
+    6.22741452364621501295e-09, /* 0x3E3ABF1D, 0x5BA69A86 */
+    1.66559246207992079114e-11, /* 0x3DB25039, 0xDACA772A */
+];
+
+pub fn y1(x: f64) -> f64 {
+    let z: f64;
+    let u: f64;
+    let v: f64;
+    let ix: u32;
+    let lx: u32;
+
+    ix = get_high_word(x);
+    lx = get_low_word(x);
+
+    /* y1(nan)=nan, y1(<0)=nan, y1(0)=-inf, y1(inf)=0 */
+    if (ix << 1 | lx) == 0 {
+        return -1.0 / 0.0;
+    }
+    if (ix >> 31) != 0 {
+        return 0.0 / 0.0;
+    }
+    if ix >= 0x7ff00000 {
+        return 1.0 / x;
+    }
+
+    if ix >= 0x40000000 {
+        /* x >= 2 */
+        return common(ix, x, true, false);
+    }
+    if ix < 0x3c900000 {
+        /* x < 2**-54 */
+        return -TPI / x;
+    }
+    z = x * x;
+    u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
+    v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
+    return x * (u / v) + TPI * (j1(x) * log(x) - 1.0 / x);
+}
+
+/* For x >= 8, the asymptotic expansions of pone is
+ *      1 + 15/128 s^2 - 4725/2^15 s^4 - ...,   where s = 1/x.
+ * We approximate pone by
+ *      pone(x) = 1 + (R/S)
+ * where  R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
+ *        S = 1 + ps0*s^2 + ... + ps4*s^10
+ * and
+ *      | pone(x)-1-R/S | <= 2  ** ( -60.06)
+ */
+
+const PR8: [f64; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+    1.17187499999988647970e-01, /* 0x3FBDFFFF, 0xFFFFFCCE */
+    1.32394806593073575129e+01, /* 0x402A7A9D, 0x357F7FCE */
+    4.12051854307378562225e+02, /* 0x4079C0D4, 0x652EA590 */
+    3.87474538913960532227e+03, /* 0x40AE457D, 0xA3A532CC */
+    7.91447954031891731574e+03, /* 0x40BEEA7A, 0xC32782DD */
+];
+const PS8: [f64; 5] = [
+    1.14207370375678408436e+02, /* 0x405C8D45, 0x8E656CAC */
+    3.65093083420853463394e+03, /* 0x40AC85DC, 0x964D274F */
+    3.69562060269033463555e+04, /* 0x40E20B86, 0x97C5BB7F */
+    9.76027935934950801311e+04, /* 0x40F7D42C, 0xB28F17BB */
+    3.08042720627888811578e+04, /* 0x40DE1511, 0x697A0B2D */
+];
+
+const PR5: [f64; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    1.31990519556243522749e-11, /* 0x3DAD0667, 0xDAE1CA7D */
+    1.17187493190614097638e-01, /* 0x3FBDFFFF, 0xE2C10043 */
+    6.80275127868432871736e+00, /* 0x401B3604, 0x6E6315E3 */
+    1.08308182990189109773e+02, /* 0x405B13B9, 0x452602ED */
+    5.17636139533199752805e+02, /* 0x40802D16, 0xD052D649 */
+    5.28715201363337541807e+02, /* 0x408085B8, 0xBB7E0CB7 */
+];
+const PS5: [f64; 5] = [
+    5.92805987221131331921e+01, /* 0x404DA3EA, 0xA8AF633D */
+    9.91401418733614377743e+02, /* 0x408EFB36, 0x1B066701 */
+    5.35326695291487976647e+03, /* 0x40B4E944, 0x5706B6FB */
+    7.84469031749551231769e+03, /* 0x40BEA4B0, 0xB8A5BB15 */
+    1.50404688810361062679e+03, /* 0x40978030, 0x036F5E51 */
+];
+
+const PR3: [f64; 6] = [
+    3.02503916137373618024e-09, /* 0x3E29FC21, 0xA7AD9EDD */
+    1.17186865567253592491e-01, /* 0x3FBDFFF5, 0x5B21D17B */
+    3.93297750033315640650e+00, /* 0x400F76BC, 0xE85EAD8A */
+    3.51194035591636932736e+01, /* 0x40418F48, 0x9DA6D129 */
+    9.10550110750781271918e+01, /* 0x4056C385, 0x4D2C1837 */
+    4.85590685197364919645e+01, /* 0x4048478F, 0x8EA83EE5 */
+];
+const PS3: [f64; 5] = [
+    3.47913095001251519989e+01, /* 0x40416549, 0xA134069C */
+    3.36762458747825746741e+02, /* 0x40750C33, 0x07F1A75F */
+    1.04687139975775130551e+03, /* 0x40905B7C, 0x5037D523 */
+    8.90811346398256432622e+02, /* 0x408BD67D, 0xA32E31E9 */
+    1.03787932439639277504e+02, /* 0x4059F26D, 0x7C2EED53 */
+];
+
+const PR2: [f64; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    1.07710830106873743082e-07, /* 0x3E7CE9D4, 0xF65544F4 */
+    1.17176219462683348094e-01, /* 0x3FBDFF42, 0xBE760D83 */
+    2.36851496667608785174e+00, /* 0x4002F2B7, 0xF98FAEC0 */
+    1.22426109148261232917e+01, /* 0x40287C37, 0x7F71A964 */
+    1.76939711271687727390e+01, /* 0x4031B1A8, 0x177F8EE2 */
+    5.07352312588818499250e+00, /* 0x40144B49, 0xA574C1FE */
+];
+const PS2: [f64; 5] = [
+    2.14364859363821409488e+01, /* 0x40356FBD, 0x8AD5ECDC */
+    1.25290227168402751090e+02, /* 0x405F5293, 0x14F92CD5 */
+    2.32276469057162813669e+02, /* 0x406D08D8, 0xD5A2DBD9 */
+    1.17679373287147100768e+02, /* 0x405D6B7A, 0xDA1884A9 */
+    8.36463893371618283368e+00, /* 0x4020BAB1, 0xF44E5192 */
+];
+
+fn pone(x: f64) -> f64 {
+    let p: &[f64; 6];
+    let q: &[f64; 5];
+    let z: f64;
+    let r: f64;
+    let s: f64;
+    let mut ix: u32;
+
+    ix = get_high_word(x);
+    ix &= 0x7fffffff;
+    if ix >= 0x40200000 {
+        p = &PR8;
+        q = &PS8;
+    } else if ix >= 0x40122E8B {
+        p = &PR5;
+        q = &PS5;
+    } else if ix >= 0x4006DB6D {
+        p = &PR3;
+        q = &PS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &PR2;
+        q = &PS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
+    return 1.0 + r / s;
+}
+
+/* For x >= 8, the asymptotic expansions of qone is
+ *      3/8 s - 105/1024 s^3 - ..., where s = 1/x.
+ * We approximate pone by
+ *      qone(x) = s*(0.375 + (R/S))
+ * where  R = qr1*s^2 + qr2*s^4 + ... + qr5*s^10
+ *        S = 1 + qs1*s^2 + ... + qs6*s^12
+ * and
+ *      | qone(x)/s -0.375-R/S | <= 2  ** ( -61.13)
+ */
+
+const QR8: [f64; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.00000000000000000000e+00,  /* 0x00000000, 0x00000000 */
+    -1.02539062499992714161e-01, /* 0xBFBA3FFF, 0xFFFFFDF3 */
+    -1.62717534544589987888e+01, /* 0xC0304591, 0xA26779F7 */
+    -7.59601722513950107896e+02, /* 0xC087BCD0, 0x53E4B576 */
+    -1.18498066702429587167e+04, /* 0xC0C724E7, 0x40F87415 */
+    -4.84385124285750353010e+04, /* 0xC0E7A6D0, 0x65D09C6A */
+];
+const QS8: [f64; 6] = [
+    1.61395369700722909556e+02,  /* 0x40642CA6, 0xDE5BCDE5 */
+    7.82538599923348465381e+03,  /* 0x40BE9162, 0xD0D88419 */
+    1.33875336287249578163e+05,  /* 0x4100579A, 0xB0B75E98 */
+    7.19657723683240939863e+05,  /* 0x4125F653, 0x72869C19 */
+    6.66601232617776375264e+05,  /* 0x412457D2, 0x7719AD5C */
+    -2.94490264303834643215e+05, /* 0xC111F969, 0x0EA5AA18 */
+];
+
+const QR5: [f64; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    -2.08979931141764104297e-11, /* 0xBDB6FA43, 0x1AA1A098 */
+    -1.02539050241375426231e-01, /* 0xBFBA3FFF, 0xCB597FEF */
+    -8.05644828123936029840e+00, /* 0xC0201CE6, 0xCA03AD4B */
+    -1.83669607474888380239e+02, /* 0xC066F56D, 0x6CA7B9B0 */
+    -1.37319376065508163265e+03, /* 0xC09574C6, 0x6931734F */
+    -2.61244440453215656817e+03, /* 0xC0A468E3, 0x88FDA79D */
+];
+const QS5: [f64; 6] = [
+    8.12765501384335777857e+01,  /* 0x405451B2, 0xFF5A11B2 */
+    1.99179873460485964642e+03,  /* 0x409F1F31, 0xE77BF839 */
+    1.74684851924908907677e+04,  /* 0x40D10F1F, 0x0D64CE29 */
+    4.98514270910352279316e+04,  /* 0x40E8576D, 0xAABAD197 */
+    2.79480751638918118260e+04,  /* 0x40DB4B04, 0xCF7C364B */
+    -4.71918354795128470869e+03, /* 0xC0B26F2E, 0xFCFFA004 */
+];
+
+const QR3: [f64; 6] = [
+    -5.07831226461766561369e-09, /* 0xBE35CFA9, 0xD38FC84F */
+    -1.02537829820837089745e-01, /* 0xBFBA3FEB, 0x51AEED54 */
+    -4.61011581139473403113e+00, /* 0xC01270C2, 0x3302D9FF */
+    -5.78472216562783643212e+01, /* 0xC04CEC71, 0xC25D16DA */
+    -2.28244540737631695038e+02, /* 0xC06C87D3, 0x4718D55F */
+    -2.19210128478909325622e+02, /* 0xC06B66B9, 0x5F5C1BF6 */
+];
+const QS3: [f64; 6] = [
+    4.76651550323729509273e+01,  /* 0x4047D523, 0xCCD367E4 */
+    6.73865112676699709482e+02,  /* 0x40850EEB, 0xC031EE3E */
+    3.38015286679526343505e+03,  /* 0x40AA684E, 0x448E7C9A */
+    5.54772909720722782367e+03,  /* 0x40B5ABBA, 0xA61D54A6 */
+    1.90311919338810798763e+03,  /* 0x409DBC7A, 0x0DD4DF4B */
+    -1.35201191444307340817e+02, /* 0xC060E670, 0x290A311F */
+];
+
+const QR2: [f64; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    -1.78381727510958865572e-07, /* 0xBE87F126, 0x44C626D2 */
+    -1.02517042607985553460e-01, /* 0xBFBA3E8E, 0x9148B010 */
+    -2.75220568278187460720e+00, /* 0xC0060484, 0x69BB4EDA */
+    -1.96636162643703720221e+01, /* 0xC033A9E2, 0xC168907F */
+    -4.23253133372830490089e+01, /* 0xC04529A3, 0xDE104AAA */
+    -2.13719211703704061733e+01, /* 0xC0355F36, 0x39CF6E52 */
+];
+const QS2: [f64; 6] = [
+    2.95333629060523854548e+01,  /* 0x403D888A, 0x78AE64FF */
+    2.52981549982190529136e+02,  /* 0x406F9F68, 0xDB821CBA */
+    7.57502834868645436472e+02,  /* 0x4087AC05, 0xCE49A0F7 */
+    7.39393205320467245656e+02,  /* 0x40871B25, 0x48D4C029 */
+    1.55949003336666123687e+02,  /* 0x40637E5E, 0x3C3ED8D4 */
+    -4.95949898822628210127e+00, /* 0xC013D686, 0xE71BE86B */
+];
+
+fn qone(x: f64) -> f64 {
+    let p: &[f64; 6];
+    let q: &[f64; 6];
+    let s: f64;
+    let r: f64;
+    let z: f64;
+    let mut ix: u32;
+
+    ix = get_high_word(x);
+    ix &= 0x7fffffff;
+    if ix >= 0x40200000 {
+        p = &QR8;
+        q = &QS8;
+    } else if ix >= 0x40122E8B {
+        p = &QR5;
+        q = &QS5;
+    } else if ix >= 0x4006DB6D {
+        p = &QR3;
+        q = &QS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &QR2;
+        q = &QS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
+    return (0.375 + r / s) / x;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/j1f.rs.html b/src/libm/math/j1f.rs.html new file mode 100644 index 000000000..fabb11286 --- /dev/null +++ b/src/libm/math/j1f.rs.html @@ -0,0 +1,719 @@ +j1f.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_j1f.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{cosf, fabsf, logf, sinf, sqrtf};
+
+const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
+const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
+
+fn common(ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
+    let z: f64;
+    let mut s: f64;
+    let c: f64;
+    let mut ss: f64;
+    let mut cc: f64;
+
+    s = sinf(x) as f64;
+    if y1 {
+        s = -s;
+    }
+    c = cosf(x) as f64;
+    cc = s - c;
+    if ix < 0x7f000000 {
+        ss = -s - c;
+        z = cosf(2.0 * x) as f64;
+        if s * c > 0.0 {
+            cc = z / ss;
+        } else {
+            ss = z / cc;
+        }
+        if ix < 0x58800000 {
+            if y1 {
+                ss = -ss;
+            }
+            cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
+        }
+    }
+    if sign {
+        cc = -cc;
+    }
+    return INVSQRTPI * (cc as f32) / sqrtf(x);
+}
+
+/* R0/S0 on [0,2] */
+const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
+const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
+const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
+const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
+const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
+const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
+const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
+const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
+const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
+
+pub fn j1f(x: f32) -> f32 {
+    let mut z: f32;
+    let r: f32;
+    let s: f32;
+    let mut ix: u32;
+    let sign: bool;
+
+    ix = x.to_bits();
+    sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+    if ix >= 0x7f800000 {
+        return 1.0 / (x * x);
+    }
+    if ix >= 0x40000000 {
+        /* |x| >= 2 */
+        return common(ix, fabsf(x), false, sign);
+    }
+    if ix >= 0x39000000 {
+        /* |x| >= 2**-13 */
+        z = x * x;
+        r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
+        s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
+        z = 0.5 + r / s;
+    } else {
+        z = 0.5;
+    }
+    return z * x;
+}
+
+const U0: [f32; 5] = [
+    -1.9605709612e-01, /* 0xbe48c331 */
+    5.0443872809e-02,  /* 0x3d4e9e3c */
+    -1.9125689287e-03, /* 0xbafaaf2a */
+    2.3525259166e-05,  /* 0x37c5581c */
+    -9.1909917899e-08, /* 0xb3c56003 */
+];
+const V0: [f32; 5] = [
+    1.9916731864e-02, /* 0x3ca3286a */
+    2.0255257550e-04, /* 0x3954644b */
+    1.3560879779e-06, /* 0x35b602d4 */
+    6.2274145840e-09, /* 0x31d5f8eb */
+    1.6655924903e-11, /* 0x2d9281cf */
+];
+
+pub fn y1f(x: f32) -> f32 {
+    let z: f32;
+    let u: f32;
+    let v: f32;
+    let ix: u32;
+
+    ix = x.to_bits();
+    if (ix & 0x7fffffff) == 0 {
+        return -1.0 / 0.0;
+    }
+    if (ix >> 31) != 0 {
+        return 0.0 / 0.0;
+    }
+    if ix >= 0x7f800000 {
+        return 1.0 / x;
+    }
+    if ix >= 0x40000000 {
+        /* |x| >= 2.0 */
+        return common(ix, x, true, false);
+    }
+    if ix < 0x33000000 {
+        /* x < 2**-25 */
+        return -TPI / x;
+    }
+    z = x * x;
+    u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
+    v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
+    return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
+}
+
+/* For x >= 8, the asymptotic expansions of pone is
+ *      1 + 15/128 s^2 - 4725/2^15 s^4 - ...,   where s = 1/x.
+ * We approximate pone by
+ *      pone(x) = 1 + (R/S)
+ * where  R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
+ *        S = 1 + ps0*s^2 + ... + ps4*s^10
+ * and
+ *      | pone(x)-1-R/S | <= 2  ** ( -60.06)
+ */
+
+const PR8: [f32; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.0000000000e+00, /* 0x00000000 */
+    1.1718750000e-01, /* 0x3df00000 */
+    1.3239480972e+01, /* 0x4153d4ea */
+    4.1205184937e+02, /* 0x43ce06a3 */
+    3.8747453613e+03, /* 0x45722bed */
+    7.9144794922e+03, /* 0x45f753d6 */
+];
+const PS8: [f32; 5] = [
+    1.1420736694e+02, /* 0x42e46a2c */
+    3.6509309082e+03, /* 0x45642ee5 */
+    3.6956207031e+04, /* 0x47105c35 */
+    9.7602796875e+04, /* 0x47bea166 */
+    3.0804271484e+04, /* 0x46f0a88b */
+];
+
+const PR5: [f32; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    1.3199052094e-11, /* 0x2d68333f */
+    1.1718749255e-01, /* 0x3defffff */
+    6.8027510643e+00, /* 0x40d9b023 */
+    1.0830818176e+02, /* 0x42d89dca */
+    5.1763616943e+02, /* 0x440168b7 */
+    5.2871520996e+02, /* 0x44042dc6 */
+];
+const PS5: [f32; 5] = [
+    5.9280597687e+01, /* 0x426d1f55 */
+    9.9140142822e+02, /* 0x4477d9b1 */
+    5.3532670898e+03, /* 0x45a74a23 */
+    7.8446904297e+03, /* 0x45f52586 */
+    1.5040468750e+03, /* 0x44bc0180 */
+];
+
+const PR3: [f32; 6] = [
+    3.0250391081e-09, /* 0x314fe10d */
+    1.1718686670e-01, /* 0x3defffab */
+    3.9329774380e+00, /* 0x407bb5e7 */
+    3.5119403839e+01, /* 0x420c7a45 */
+    9.1055007935e+01, /* 0x42b61c2a */
+    4.8559066772e+01, /* 0x42423c7c */
+];
+const PS3: [f32; 5] = [
+    3.4791309357e+01, /* 0x420b2a4d */
+    3.3676245117e+02, /* 0x43a86198 */
+    1.0468714600e+03, /* 0x4482dbe3 */
+    8.9081134033e+02, /* 0x445eb3ed */
+    1.0378793335e+02, /* 0x42cf936c */
+];
+
+const PR2: [f32; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    1.0771083225e-07, /* 0x33e74ea8 */
+    1.1717621982e-01, /* 0x3deffa16 */
+    2.3685150146e+00, /* 0x401795c0 */
+    1.2242610931e+01, /* 0x4143e1bc */
+    1.7693971634e+01, /* 0x418d8d41 */
+    5.0735230446e+00, /* 0x40a25a4d */
+];
+const PS2: [f32; 5] = [
+    2.1436485291e+01, /* 0x41ab7dec */
+    1.2529022980e+02, /* 0x42fa9499 */
+    2.3227647400e+02, /* 0x436846c7 */
+    1.1767937469e+02, /* 0x42eb5bd7 */
+    8.3646392822e+00, /* 0x4105d590 */
+];
+
+fn ponef(x: f32) -> f32 {
+    let p: &[f32; 6];
+    let q: &[f32; 5];
+    let z: f32;
+    let r: f32;
+    let s: f32;
+    let mut ix: u32;
+
+    ix = x.to_bits();
+    ix &= 0x7fffffff;
+    if ix >= 0x41000000 {
+        p = &PR8;
+        q = &PS8;
+    } else if ix >= 0x409173eb {
+        p = &PR5;
+        q = &PS5;
+    } else if ix >= 0x4036d917 {
+        p = &PR3;
+        q = &PS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &PR2;
+        q = &PS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
+    return 1.0 + r / s;
+}
+
+/* For x >= 8, the asymptotic expansions of qone is
+ *      3/8 s - 105/1024 s^3 - ..., where s = 1/x.
+ * We approximate pone by
+ *      qone(x) = s*(0.375 + (R/S))
+ * where  R = qr1*s^2 + qr2*s^4 + ... + qr5*s^10
+ *        S = 1 + qs1*s^2 + ... + qs6*s^12
+ * and
+ *      | qone(x)/s -0.375-R/S | <= 2  ** ( -61.13)
+ */
+
+const QR8: [f32; 6] = [
+    /* for x in [inf, 8]=1/[0,0.125] */
+    0.0000000000e+00,  /* 0x00000000 */
+    -1.0253906250e-01, /* 0xbdd20000 */
+    -1.6271753311e+01, /* 0xc1822c8d */
+    -7.5960174561e+02, /* 0xc43de683 */
+    -1.1849806641e+04, /* 0xc639273a */
+    -4.8438511719e+04, /* 0xc73d3683 */
+];
+const QS8: [f32; 6] = [
+    1.6139537048e+02,  /* 0x43216537 */
+    7.8253862305e+03,  /* 0x45f48b17 */
+    1.3387534375e+05,  /* 0x4802bcd6 */
+    7.1965775000e+05,  /* 0x492fb29c */
+    6.6660125000e+05,  /* 0x4922be94 */
+    -2.9449025000e+05, /* 0xc88fcb48 */
+];
+
+const QR5: [f32; 6] = [
+    /* for x in [8,4.5454]=1/[0.125,0.22001] */
+    -2.0897993405e-11, /* 0xadb7d219 */
+    -1.0253904760e-01, /* 0xbdd1fffe */
+    -8.0564479828e+00, /* 0xc100e736 */
+    -1.8366960144e+02, /* 0xc337ab6b */
+    -1.3731937256e+03, /* 0xc4aba633 */
+    -2.6124443359e+03, /* 0xc523471c */
+];
+const QS5: [f32; 6] = [
+    8.1276550293e+01,  /* 0x42a28d98 */
+    1.9917987061e+03,  /* 0x44f8f98f */
+    1.7468484375e+04,  /* 0x468878f8 */
+    4.9851425781e+04,  /* 0x4742bb6d */
+    2.7948074219e+04,  /* 0x46da5826 */
+    -4.7191835938e+03, /* 0xc5937978 */
+];
+
+const QR3: [f32; 6] = [
+    -5.0783124372e-09, /* 0xb1ae7d4f */
+    -1.0253783315e-01, /* 0xbdd1ff5b */
+    -4.6101160049e+00, /* 0xc0938612 */
+    -5.7847221375e+01, /* 0xc267638e */
+    -2.2824453735e+02, /* 0xc3643e9a */
+    -2.1921012878e+02, /* 0xc35b35cb */
+];
+const QS3: [f32; 6] = [
+    4.7665153503e+01,  /* 0x423ea91e */
+    6.7386511230e+02,  /* 0x4428775e */
+    3.3801528320e+03,  /* 0x45534272 */
+    5.5477290039e+03,  /* 0x45ad5dd5 */
+    1.9031191406e+03,  /* 0x44ede3d0 */
+    -1.3520118713e+02, /* 0xc3073381 */
+];
+
+const QR2: [f32; 6] = [
+    /* for x in [2.8570,2]=1/[0.3499,0.5] */
+    -1.7838172539e-07, /* 0xb43f8932 */
+    -1.0251704603e-01, /* 0xbdd1f475 */
+    -2.7522056103e+00, /* 0xc0302423 */
+    -1.9663616180e+01, /* 0xc19d4f16 */
+    -4.2325313568e+01, /* 0xc2294d1f */
+    -2.1371921539e+01, /* 0xc1aaf9b2 */
+];
+const QS2: [f32; 6] = [
+    2.9533363342e+01,  /* 0x41ec4454 */
+    2.5298155212e+02,  /* 0x437cfb47 */
+    7.5750280762e+02,  /* 0x443d602e */
+    7.3939318848e+02,  /* 0x4438d92a */
+    1.5594900513e+02,  /* 0x431bf2f2 */
+    -4.9594988823e+00, /* 0xc09eb437 */
+];
+
+fn qonef(x: f32) -> f32 {
+    let p: &[f32; 6];
+    let q: &[f32; 6];
+    let s: f32;
+    let r: f32;
+    let z: f32;
+    let mut ix: u32;
+
+    ix = x.to_bits();
+    ix &= 0x7fffffff;
+    if ix >= 0x41000000 {
+        p = &QR8;
+        q = &QS8;
+    } else if ix >= 0x409173eb {
+        p = &QR5;
+        q = &QS5;
+    } else if ix >= 0x4036d917 {
+        p = &QR3;
+        q = &QS3;
+    } else
+    /*ix >= 0x40000000*/
+    {
+        p = &QR2;
+        q = &QS2;
+    }
+    z = 1.0 / (x * x);
+    r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
+    s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
+    return (0.375 + r / s) / x;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/jn.rs.html b/src/libm/math/jn.rs.html new file mode 100644 index 000000000..a769cd928 --- /dev/null +++ b/src/libm/math/jn.rs.html @@ -0,0 +1,689 @@ +jn.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_jn.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/*
+ * jn(n, x), yn(n, x)
+ * floating point Bessel's function of the 1st and 2nd kind
+ * of order n
+ *
+ * Special cases:
+ *      y0(0)=y1(0)=yn(n,0) = -inf with division by zero signal;
+ *      y0(-ve)=y1(-ve)=yn(n,-ve) are NaN with invalid signal.
+ * Note 2. About jn(n,x), yn(n,x)
+ *      For n=0, j0(x) is called,
+ *      for n=1, j1(x) is called,
+ *      for n<=x, forward recursion is used starting
+ *      from values of j0(x) and j1(x).
+ *      for n>x, a continued fraction approximation to
+ *      j(n,x)/j(n-1,x) is evaluated and then backward
+ *      recursion is used starting from a supposed value
+ *      for j(n,x). The resulting value of j(0,x) is
+ *      compared with the actual value to correct the
+ *      supposed value of j(n,x).
+ *
+ *      yn(n,x) is similar in all respects, except
+ *      that forward recursion is used for all
+ *      values of n>1.
+ */
+
+use super::{cos, fabs, get_high_word, get_low_word, j0, j1, log, sin, sqrt, y0, y1};
+
+const INVSQRTPI: f64 = 5.64189583547756279280e-01; /* 0x3FE20DD7, 0x50429B6D */
+
+pub fn jn(n: i32, mut x: f64) -> f64 {
+    let mut ix: u32;
+    let lx: u32;
+    let nm1: i32;
+    let mut i: i32;
+    let mut sign: bool;
+    let mut a: f64;
+    let mut b: f64;
+    let mut temp: f64;
+
+    ix = get_high_word(x);
+    lx = get_low_word(x);
+    sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+
+    // -lx == !lx + 1
+    if (ix | (lx | ((!lx).wrapping_add(1))) >> 31) > 0x7ff00000 {
+        /* nan */
+        return x;
+    }
+
+    /* J(-n,x) = (-1)^n * J(n, x), J(n, -x) = (-1)^n * J(n, x)
+     * Thus, J(-n,x) = J(n,-x)
+     */
+    /* nm1 = |n|-1 is used instead of |n| to handle n==INT_MIN */
+    if n == 0 {
+        return j0(x);
+    }
+    if n < 0 {
+        nm1 = -(n + 1);
+        x = -x;
+        sign = !sign;
+    } else {
+        nm1 = n - 1;
+    }
+    if nm1 == 0 {
+        return j1(x);
+    }
+
+    sign &= (n & 1) != 0; /* even n: 0, odd n: signbit(x) */
+    x = fabs(x);
+    if (ix | lx) == 0 || ix == 0x7ff00000 {
+        /* if x is 0 or inf */
+        b = 0.0;
+    } else if (nm1 as f64) < x {
+        /* Safe to use J(n+1,x)=2n/x *J(n,x)-J(n-1,x) */
+        if ix >= 0x52d00000 {
+            /* x > 2**302 */
+            /* (x >> n**2)
+             *      Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+             *      Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+             *      Let s=sin(x), c=cos(x),
+             *          xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+             *
+             *             n    sin(xn)*sqt2    cos(xn)*sqt2
+             *          ----------------------------------
+             *             0     s-c             c+s
+             *             1    -s-c            -c+s
+             *             2    -s+c            -c-s
+             *             3     s+c             c-s
+             */
+            temp = match nm1 & 3 {
+                0 => -cos(x) + sin(x),
+                1 => -cos(x) - sin(x),
+                2 => cos(x) - sin(x),
+                3 | _ => cos(x) + sin(x),
+            };
+            b = INVSQRTPI * temp / sqrt(x);
+        } else {
+            a = j0(x);
+            b = j1(x);
+            i = 0;
+            while i < nm1 {
+                i += 1;
+                temp = b;
+                b = b * (2.0 * (i as f64) / x) - a; /* avoid underflow */
+                a = temp;
+            }
+        }
+    } else {
+        if ix < 0x3e100000 {
+            /* x < 2**-29 */
+            /* x is tiny, return the first Taylor expansion of J(n,x)
+             * J(n,x) = 1/n!*(x/2)^n  - ...
+             */
+            if nm1 > 32 {
+                /* underflow */
+                b = 0.0;
+            } else {
+                temp = x * 0.5;
+                b = temp;
+                a = 1.0;
+                i = 2;
+                while i <= nm1 + 1 {
+                    a *= i as f64; /* a = n! */
+                    b *= temp; /* b = (x/2)^n */
+                    i += 1;
+                }
+                b = b / a;
+            }
+        } else {
+            /* use backward recurrence */
+            /*                      x      x^2      x^2
+             *  J(n,x)/J(n-1,x) =  ----   ------   ------   .....
+             *                      2n  - 2(n+1) - 2(n+2)
+             *
+             *                      1      1        1
+             *  (for large x)   =  ----  ------   ------   .....
+             *                      2n   2(n+1)   2(n+2)
+             *                      -- - ------ - ------ -
+             *                       x     x         x
+             *
+             * Let w = 2n/x and h=2/x, then the above quotient
+             * is equal to the continued fraction:
+             *                  1
+             *      = -----------------------
+             *                     1
+             *         w - -----------------
+             *                        1
+             *              w+h - ---------
+             *                     w+2h - ...
+             *
+             * To determine how many terms needed, let
+             * Q(0) = w, Q(1) = w(w+h) - 1,
+             * Q(k) = (w+k*h)*Q(k-1) - Q(k-2),
+             * When Q(k) > 1e4      good for single
+             * When Q(k) > 1e9      good for double
+             * When Q(k) > 1e17     good for quadruple
+             */
+            /* determine k */
+            let mut t: f64;
+            let mut q0: f64;
+            let mut q1: f64;
+            let mut w: f64;
+            let h: f64;
+            let mut z: f64;
+            let mut tmp: f64;
+            let nf: f64;
+
+            let mut k: i32;
+
+            nf = (nm1 as f64) + 1.0;
+            w = 2.0 * nf / x;
+            h = 2.0 / x;
+            z = w + h;
+            q0 = w;
+            q1 = w * z - 1.0;
+            k = 1;
+            while q1 < 1.0e9 {
+                k += 1;
+                z += h;
+                tmp = z * q1 - q0;
+                q0 = q1;
+                q1 = tmp;
+            }
+            t = 0.0;
+            i = k;
+            while i >= 0 {
+                t = 1.0 / (2.0 * ((i as f64) + nf) / x - t);
+                i -= 1;
+            }
+            a = t;
+            b = 1.0;
+            /*  estimate log((2/x)^n*n!) = n*log(2/x)+n*ln(n)
+             *  Hence, if n*(log(2n/x)) > ...
+             *  single 8.8722839355e+01
+             *  double 7.09782712893383973096e+02
+             *  long double 1.1356523406294143949491931077970765006170e+04
+             *  then recurrent value may overflow and the result is
+             *  likely underflow to zero
+             */
+            tmp = nf * log(fabs(w));
+            if tmp < 7.09782712893383973096e+02 {
+                i = nm1;
+                while i > 0 {
+                    temp = b;
+                    b = b * (2.0 * (i as f64)) / x - a;
+                    a = temp;
+                    i -= 1;
+                }
+            } else {
+                i = nm1;
+                while i > 0 {
+                    temp = b;
+                    b = b * (2.0 * (i as f64)) / x - a;
+                    a = temp;
+                    /* scale b to avoid spurious overflow */
+                    let x1p500 = f64::from_bits(0x5f30000000000000); // 0x1p500 == 2^500
+                    if b > x1p500 {
+                        a /= b;
+                        t /= b;
+                        b = 1.0;
+                    }
+                    i -= 1;
+                }
+            }
+            z = j0(x);
+            w = j1(x);
+            if fabs(z) >= fabs(w) {
+                b = t * z / b;
+            } else {
+                b = t * w / a;
+            }
+        }
+    }
+
+    if sign {
+        -b
+    } else {
+        b
+    }
+}
+
+pub fn yn(n: i32, x: f64) -> f64 {
+    let mut ix: u32;
+    let lx: u32;
+    let mut ib: u32;
+    let nm1: i32;
+    let mut sign: bool;
+    let mut i: i32;
+    let mut a: f64;
+    let mut b: f64;
+    let mut temp: f64;
+
+    ix = get_high_word(x);
+    lx = get_low_word(x);
+    sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+
+    // -lx == !lx + 1
+    if (ix | (lx | ((!lx).wrapping_add(1))) >> 31) > 0x7ff00000 {
+        /* nan */
+        return x;
+    }
+    if sign && (ix | lx) != 0 {
+        /* x < 0 */
+        return 0.0 / 0.0;
+    }
+    if ix == 0x7ff00000 {
+        return 0.0;
+    }
+
+    if n == 0 {
+        return y0(x);
+    }
+    if n < 0 {
+        nm1 = -(n + 1);
+        sign = (n & 1) != 0;
+    } else {
+        nm1 = n - 1;
+        sign = false;
+    }
+    if nm1 == 0 {
+        if sign {
+            return -y1(x);
+        } else {
+            return y1(x);
+        }
+    }
+
+    if ix >= 0x52d00000 {
+        /* x > 2**302 */
+        /* (x >> n**2)
+         *      Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+         *      Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+         *      Let s=sin(x), c=cos(x),
+         *          xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+         *
+         *             n    sin(xn)*sqt2    cos(xn)*sqt2
+         *          ----------------------------------
+         *             0     s-c             c+s
+         *             1    -s-c            -c+s
+         *             2    -s+c            -c-s
+         *             3     s+c             c-s
+         */
+        temp = match nm1 & 3 {
+            0 => -sin(x) - cos(x),
+            1 => -sin(x) + cos(x),
+            2 => sin(x) + cos(x),
+            3 | _ => sin(x) - cos(x),
+        };
+        b = INVSQRTPI * temp / sqrt(x);
+    } else {
+        a = y0(x);
+        b = y1(x);
+        /* quit if b is -inf */
+        ib = get_high_word(b);
+        i = 0;
+        while i < nm1 && ib != 0xfff00000 {
+            i += 1;
+            temp = b;
+            b = (2.0 * (i as f64) / x) * b - a;
+            ib = get_high_word(b);
+            a = temp;
+        }
+    }
+
+    if sign {
+        -b
+    } else {
+        b
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/jnf.rs.html b/src/libm/math/jnf.rs.html new file mode 100644 index 000000000..ceab438d2 --- /dev/null +++ b/src/libm/math/jnf.rs.html @@ -0,0 +1,521 @@ +jnf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_jnf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{fabsf, j0f, j1f, logf, y0f, y1f};
+
+pub fn jnf(n: i32, mut x: f32) -> f32 {
+    let mut ix: u32;
+    let mut nm1: i32;
+    let mut sign: bool;
+    let mut i: i32;
+    let mut a: f32;
+    let mut b: f32;
+    let mut temp: f32;
+
+    ix = x.to_bits();
+    sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+    if ix > 0x7f800000 {
+        /* nan */
+        return x;
+    }
+
+    /* J(-n,x) = J(n,-x), use |n|-1 to avoid overflow in -n */
+    if n == 0 {
+        return j0f(x);
+    }
+    if n < 0 {
+        nm1 = -(n + 1);
+        x = -x;
+        sign = !sign;
+    } else {
+        nm1 = n - 1;
+    }
+    if nm1 == 0 {
+        return j1f(x);
+    }
+
+    sign &= (n & 1) != 0; /* even n: 0, odd n: signbit(x) */
+    x = fabsf(x);
+    if ix == 0 || ix == 0x7f800000 {
+        /* if x is 0 or inf */
+        b = 0.0;
+    } else if (nm1 as f32) < x {
+        /* Safe to use J(n+1,x)=2n/x *J(n,x)-J(n-1,x) */
+        a = j0f(x);
+        b = j1f(x);
+        i = 0;
+        while i < nm1 {
+            i += 1;
+            temp = b;
+            b = b * (2.0 * (i as f32) / x) - a;
+            a = temp;
+        }
+    } else {
+        if ix < 0x35800000 {
+            /* x < 2**-20 */
+            /* x is tiny, return the first Taylor expansion of J(n,x)
+             * J(n,x) = 1/n!*(x/2)^n  - ...
+             */
+            if nm1 > 8 {
+                /* underflow */
+                nm1 = 8;
+            }
+            temp = 0.5 * x;
+            b = temp;
+            a = 1.0;
+            i = 2;
+            while i <= nm1 + 1 {
+                a *= i as f32; /* a = n! */
+                b *= temp; /* b = (x/2)^n */
+                i += 1;
+            }
+            b = b / a;
+        } else {
+            /* use backward recurrence */
+            /*                      x      x^2      x^2
+             *  J(n,x)/J(n-1,x) =  ----   ------   ------   .....
+             *                      2n  - 2(n+1) - 2(n+2)
+             *
+             *                      1      1        1
+             *  (for large x)   =  ----  ------   ------   .....
+             *                      2n   2(n+1)   2(n+2)
+             *                      -- - ------ - ------ -
+             *                       x     x         x
+             *
+             * Let w = 2n/x and h=2/x, then the above quotient
+             * is equal to the continued fraction:
+             *                  1
+             *      = -----------------------
+             *                     1
+             *         w - -----------------
+             *                        1
+             *              w+h - ---------
+             *                     w+2h - ...
+             *
+             * To determine how many terms needed, let
+             * Q(0) = w, Q(1) = w(w+h) - 1,
+             * Q(k) = (w+k*h)*Q(k-1) - Q(k-2),
+             * When Q(k) > 1e4      good for single
+             * When Q(k) > 1e9      good for double
+             * When Q(k) > 1e17     good for quadruple
+             */
+            /* determine k */
+            let mut t: f32;
+            let mut q0: f32;
+            let mut q1: f32;
+            let mut w: f32;
+            let h: f32;
+            let mut z: f32;
+            let mut tmp: f32;
+            let nf: f32;
+            let mut k: i32;
+
+            nf = (nm1 as f32) + 1.0;
+            w = 2.0 * (nf as f32) / x;
+            h = 2.0 / x;
+            z = w + h;
+            q0 = w;
+            q1 = w * z - 1.0;
+            k = 1;
+            while q1 < 1.0e4 {
+                k += 1;
+                z += h;
+                tmp = z * q1 - q0;
+                q0 = q1;
+                q1 = tmp;
+            }
+            t = 0.0;
+            i = k;
+            while i >= 0 {
+                t = 1.0 / (2.0 * ((i as f32) + nf) / x - t);
+                i -= 1;
+            }
+            a = t;
+            b = 1.0;
+            /*  estimate log((2/x)^n*n!) = n*log(2/x)+n*ln(n)
+             *  Hence, if n*(log(2n/x)) > ...
+             *  single 8.8722839355e+01
+             *  double 7.09782712893383973096e+02
+             *  long double 1.1356523406294143949491931077970765006170e+04
+             *  then recurrent value may overflow and the result is
+             *  likely underflow to zero
+             */
+            tmp = nf * logf(fabsf(w));
+            if tmp < 88.721679688 {
+                i = nm1;
+                while i > 0 {
+                    temp = b;
+                    b = 2.0 * (i as f32) * b / x - a;
+                    a = temp;
+                    i -= 1;
+                }
+            } else {
+                i = nm1;
+                while i > 0 {
+                    temp = b;
+                    b = 2.0 * (i as f32) * b / x - a;
+                    a = temp;
+                    /* scale b to avoid spurious overflow */
+                    let x1p60 = f32::from_bits(0x5d800000); // 0x1p60 == 2^60
+                    if b > x1p60 {
+                        a /= b;
+                        t /= b;
+                        b = 1.0;
+                    }
+                    i -= 1;
+                }
+            }
+            z = j0f(x);
+            w = j1f(x);
+            if fabsf(z) >= fabsf(w) {
+                b = t * z / b;
+            } else {
+                b = t * w / a;
+            }
+        }
+    }
+
+    if sign {
+        -b
+    } else {
+        b
+    }
+}
+
+pub fn ynf(n: i32, x: f32) -> f32 {
+    let mut ix: u32;
+    let mut ib: u32;
+    let nm1: i32;
+    let mut sign: bool;
+    let mut i: i32;
+    let mut a: f32;
+    let mut b: f32;
+    let mut temp: f32;
+
+    ix = x.to_bits();
+    sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+    if ix > 0x7f800000 {
+        /* nan */
+        return x;
+    }
+    if sign && ix != 0 {
+        /* x < 0 */
+        return 0.0 / 0.0;
+    }
+    if ix == 0x7f800000 {
+        return 0.0;
+    }
+
+    if n == 0 {
+        return y0f(x);
+    }
+    if n < 0 {
+        nm1 = -(n + 1);
+        sign = (n & 1) != 0;
+    } else {
+        nm1 = n - 1;
+        sign = false;
+    }
+    if nm1 == 0 {
+        if sign {
+            return -y1f(x);
+        } else {
+            return y1f(x);
+        }
+    }
+
+    a = y0f(x);
+    b = y1f(x);
+    /* quit if b is -inf */
+    ib = b.to_bits();
+    i = 0;
+    while i < nm1 && ib != 0xff800000 {
+        i += 1;
+        temp = b;
+        b = (2.0 * (i as f32) / x) * b - a;
+        ib = b.to_bits();
+        a = temp;
+    }
+
+    if sign {
+        -b
+    } else {
+        b
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_cos.rs.html b/src/libm/math/k_cos.rs.html new file mode 100644 index 000000000..adac7eed1 --- /dev/null +++ b/src/libm/math/k_cos.rs.html @@ -0,0 +1,129 @@ +k_cos.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+
+// origin: FreeBSD /usr/src/lib/msun/src/k_cos.c
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+
+const C1: f64 = 4.16666666666666019037e-02; /* 0x3FA55555, 0x5555554C */
+const C2: f64 = -1.38888888888741095749e-03; /* 0xBF56C16C, 0x16C15177 */
+const C3: f64 = 2.48015872894767294178e-05; /* 0x3EFA01A0, 0x19CB1590 */
+const C4: f64 = -2.75573143513906633035e-07; /* 0xBE927E4F, 0x809C52AD */
+const C5: f64 = 2.08757232129817482790e-09; /* 0x3E21EE9E, 0xBDB4B1C4 */
+const C6: f64 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
+
+// kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
+// Input x is assumed to be bounded by ~pi/4 in magnitude.
+// Input y is the tail of x.
+//
+// Algorithm
+//      1. Since cos(-x) = cos(x), we need only to consider positive x.
+//      2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+//      3. cos(x) is approximated by a polynomial of degree 14 on
+//         [0,pi/4]
+//                                       4            14
+//              cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+//         where the remez error is
+//
+//      |              2     4     6     8     10    12     14 |     -58
+//      |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  )| <= 2
+//      |                                                      |
+//
+//                     4     6     8     10    12     14
+//      4. let r = C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  , then
+//             cos(x) ~ 1 - x*x/2 + r
+//         since cos(x+y) ~ cos(x) - sin(x)*y
+//                        ~ cos(x) - x*y,
+//         a correction term is necessary in cos(x) and hence
+//              cos(x+y) = 1 - (x*x/2 - (r - x*y))
+//         For better accuracy, rearrange to
+//              cos(x+y) ~ w + (tmp + (r-x*y))
+//         where w = 1 - x*x/2 and tmp is a tiny correction term
+//         (1 - x*x/2 == w + tmp exactly in infinite precision).
+//         The exactness of w + tmp in infinite precision depends on w
+//         and tmp having the same precision as x.  If they have extra
+//         precision due to compiler bugs, then the extra precision is
+//         only good provided it is retained in all terms of the final
+//         expression for cos().  Retention happens in all cases tested
+//         under FreeBSD, so don't pessimize things by forcibly clipping
+//         any extra precision in w.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_cos(x: f64, y: f64) -> f64 {
+    let z = x * x;
+    let w = z * z;
+    let r = z * (C1 + z * (C2 + z * C3)) + w * w * (C4 + z * (C5 + z * C6));
+    let hz = 0.5 * z;
+    let w = 1.0 - hz;
+    w + (((1.0 - w) - hz) + (z * r - x * y))
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_cosf.rs.html b/src/libm/math/k_cosf.rs.html new file mode 100644 index 000000000..252aceca3 --- /dev/null +++ b/src/libm/math/k_cosf.rs.html @@ -0,0 +1,63 @@ +k_cosf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+
+/* origin: FreeBSD /usr/src/lib/msun/src/k_cosf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Debugged and optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* |cos(x) - c(x)| < 2**-34.1 (~[-5.37e-11, 5.295e-11]). */
+const C0: f64 = -0.499999997251031003120; /* -0x1ffffffd0c5e81.0p-54 */
+const C1: f64 = 0.0416666233237390631894; /*  0x155553e1053a42.0p-57 */
+const C2: f64 = -0.00138867637746099294692; /* -0x16c087e80f1e27.0p-62 */
+const C3: f64 = 0.0000243904487962774090654; /*  0x199342e0ee5069.0p-68 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_cosf(x: f64) -> f32 {
+    let z = x * x;
+    let w = z * z;
+    let r = C2 + z * C3;
+    (((1.0 + z * C0) + w * C1) + (w * z) * r) as f32
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_expo2.rs.html b/src/libm/math/k_expo2.rs.html new file mode 100644 index 000000000..f69182657 --- /dev/null +++ b/src/libm/math/k_expo2.rs.html @@ -0,0 +1,33 @@ +k_expo2.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+
+use super::exp;
+
+/* k is such that k*ln2 has minimal relative error and x - kln2 > log(FLT_MIN) */
+const K: i32 = 2043;
+
+/* expf(x)/2 for x >= log(FLT_MAX), slightly better than 0.5f*expf(x/2)*expf(x/2) */
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_expo2(x: f64) -> f64 {
+    let k_ln2 = f64::from_bits(0x40962066151add8b);
+    /* note that k is odd and scale*scale overflows */
+    let scale = f64::from_bits(((((0x3ff + K / 2) as u32) << 20) as u64) << 32);
+    /* exp(x - k ln2) * 2**(k-1) */
+    exp(x - k_ln2) * scale * scale
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_expo2f.rs.html b/src/libm/math/k_expo2f.rs.html new file mode 100644 index 000000000..e222fd26e --- /dev/null +++ b/src/libm/math/k_expo2f.rs.html @@ -0,0 +1,33 @@ +k_expo2f.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+
+use super::expf;
+
+/* k is such that k*ln2 has minimal relative error and x - kln2 > log(FLT_MIN) */
+const K: i32 = 235;
+
+/* expf(x)/2 for x >= log(FLT_MAX), slightly better than 0.5f*expf(x/2)*expf(x/2) */
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_expo2f(x: f32) -> f32 {
+    let k_ln2 = f32::from_bits(0x4322e3bc);
+    /* note that k is odd and scale*scale overflows */
+    let scale = f32::from_bits(((0x7f + K / 2) as u32) << 23);
+    /* exp(x - k ln2) * 2**(k-1) */
+    expf(x - k_ln2) * scale * scale
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_sin.rs.html b/src/libm/math/k_sin.rs.html new file mode 100644 index 000000000..d9b3b5337 --- /dev/null +++ b/src/libm/math/k_sin.rs.html @@ -0,0 +1,119 @@ +k_sin.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+
+// origin: FreeBSD /usr/src/lib/msun/src/k_sin.c
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+
+const S1: f64 = -1.66666666666666324348e-01; /* 0xBFC55555, 0x55555549 */
+const S2: f64 = 8.33333333332248946124e-03; /* 0x3F811111, 0x1110F8A6 */
+const S3: f64 = -1.98412698298579493134e-04; /* 0xBF2A01A0, 0x19C161D5 */
+const S4: f64 = 2.75573137070700676789e-06; /* 0x3EC71DE3, 0x57B1FE7D */
+const S5: f64 = -2.50507602534068634195e-08; /* 0xBE5AE5E6, 0x8A2B9CEB */
+const S6: f64 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
+
+// kernel sin function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854
+// Input x is assumed to be bounded by ~pi/4 in magnitude.
+// Input y is the tail of x.
+// Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
+//
+// Algorithm
+//      1. Since sin(-x) = -sin(x), we need only to consider positive x.
+//      2. Callers must return sin(-0) = -0 without calling here since our
+//         odd polynomial is not evaluated in a way that preserves -0.
+//         Callers may do the optimization sin(x) ~ x for tiny x.
+//      3. sin(x) is approximated by a polynomial of degree 13 on
+//         [0,pi/4]
+//                               3            13
+//              sin(x) ~ x + S1*x + ... + S6*x
+//         where
+//
+//      |sin(x)         2     4     6     8     10     12  |     -58
+//      |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x  +S6*x   )| <= 2
+//      |  x                                               |
+//
+//      4. sin(x+y) = sin(x) + sin'(x')*y
+//                  ~ sin(x) + (1-x*x/2)*y
+//         For better accuracy, let
+//                   3      2      2      2      2
+//              r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
+//         then                   3    2
+//              sin(x) = x + (S1*x + (x *(r-y/2)+y))
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_sin(x: f64, y: f64, iy: i32) -> f64 {
+    let z = x * x;
+    let w = z * z;
+    let r = S2 + z * (S3 + z * S4) + z * w * (S5 + z * S6);
+    let v = z * x;
+    if iy == 0 {
+        x + v * (S1 + z * r)
+    } else {
+        x - ((z * (0.5 * y - v * r) - y) - v * S1)
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_sinf.rs.html b/src/libm/math/k_sinf.rs.html new file mode 100644 index 000000000..5968e3c8b --- /dev/null +++ b/src/libm/math/k_sinf.rs.html @@ -0,0 +1,65 @@ +k_sinf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+
+/* origin: FreeBSD /usr/src/lib/msun/src/k_sinf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* |sin(x)/x - s(x)| < 2**-37.5 (~[-4.89e-12, 4.824e-12]). */
+const S1: f64 = -0.166666666416265235595; /* -0x15555554cbac77.0p-55 */
+const S2: f64 = 0.0083333293858894631756; /*  0x111110896efbb2.0p-59 */
+const S3: f64 = -0.000198393348360966317347; /* -0x1a00f9e2cae774.0p-65 */
+const S4: f64 = 0.0000027183114939898219064; /*  0x16cd878c3b46a7.0p-71 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_sinf(x: f64) -> f32 {
+    let z = x * x;
+    let w = z * z;
+    let r = S3 + z * S4;
+    let s = z * x;
+    ((x + s * (S1 + z * S2)) + s * w * r) as f32
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_tan.rs.html b/src/libm/math/k_tan.rs.html new file mode 100644 index 000000000..e5f639bd1 --- /dev/null +++ b/src/libm/math/k_tan.rs.html @@ -0,0 +1,217 @@ +k_tan.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+
+// origin: FreeBSD /usr/src/lib/msun/src/k_tan.c */
+//
+// ====================================================
+// Copyright 2004 Sun Microsystems, Inc.  All Rights Reserved.
+//
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+
+// kernel tan function on ~[-pi/4, pi/4] (except on -0), pi/4 ~ 0.7854
+// Input x is assumed to be bounded by ~pi/4 in magnitude.
+// Input y is the tail of x.
+// Input odd indicates whether tan (if odd = 0) or -1/tan (if odd = 1) is returned.
+//
+// Algorithm
+//      1. Since tan(-x) = -tan(x), we need only to consider positive x.
+//      2. Callers must return tan(-0) = -0 without calling here since our
+//         odd polynomial is not evaluated in a way that preserves -0.
+//         Callers may do the optimization tan(x) ~ x for tiny x.
+//      3. tan(x) is approximated by a odd polynomial of degree 27 on
+//         [0,0.67434]
+//                               3             27
+//              tan(x) ~ x + T1*x + ... + T13*x
+//         where
+//
+//              |tan(x)         2     4            26   |     -59.2
+//              |----- - (1+T1*x +T2*x +.... +T13*x    )| <= 2
+//              |  x                                    |
+//
+//         Note: tan(x+y) = tan(x) + tan'(x)*y
+//                        ~ tan(x) + (1+x*x)*y
+//         Therefore, for better accuracy in computing tan(x+y), let
+//                   3      2      2       2       2
+//              r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
+//         then
+//                                  3    2
+//              tan(x+y) = x + (T1*x + (x *(r+y)+y))
+//
+//      4. For x in [0.67434,pi/4],  let y = pi/4 - x, then
+//              tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y))
+//                     = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y)))
+static T: [f64; 13] = [
+    3.33333333333334091986e-01,  /* 3FD55555, 55555563 */
+    1.33333333333201242699e-01,  /* 3FC11111, 1110FE7A */
+    5.39682539762260521377e-02,  /* 3FABA1BA, 1BB341FE */
+    2.18694882948595424599e-02,  /* 3F9664F4, 8406D637 */
+    8.86323982359930005737e-03,  /* 3F8226E3, E96E8493 */
+    3.59207910759131235356e-03,  /* 3F6D6D22, C9560328 */
+    1.45620945432529025516e-03,  /* 3F57DBC8, FEE08315 */
+    5.88041240820264096874e-04,  /* 3F4344D8, F2F26501 */
+    2.46463134818469906812e-04,  /* 3F3026F7, 1A8D1068 */
+    7.81794442939557092300e-05,  /* 3F147E88, A03792A6 */
+    7.14072491382608190305e-05,  /* 3F12B80F, 32F0A7E9 */
+    -1.85586374855275456654e-05, /* BEF375CB, DB605373 */
+    2.59073051863633712884e-05,  /* 3EFB2A70, 74BF7AD4 */
+];
+const PIO4: f64 = 7.85398163397448278999e-01; /* 3FE921FB, 54442D18 */
+const PIO4_LO: f64 = 3.06161699786838301793e-17; /* 3C81A626, 33145C07 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_tan(mut x: f64, mut y: f64, odd: i32) -> f64 {
+    let hx = (f64::to_bits(x) >> 32) as u32;
+    let big = (hx & 0x7fffffff) >= 0x3FE59428; /* |x| >= 0.6744 */
+    if big {
+        let sign = hx >> 31;
+        if sign != 0 {
+            x = -x;
+            y = -y;
+        }
+        x = (PIO4 - x) + (PIO4_LO - y);
+        y = 0.0;
+    }
+    let z = x * x;
+    let w = z * z;
+    /*
+     * Break x^5*(T[1]+x^2*T[2]+...) into
+     * x^5(T[1]+x^4*T[3]+...+x^20*T[11]) +
+     * x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12]))
+     */
+    let r = T[1] + w * (T[3] + w * (T[5] + w * (T[7] + w * (T[9] + w * T[11]))));
+    let v = z * (T[2] + w * (T[4] + w * (T[6] + w * (T[8] + w * (T[10] + w * T[12])))));
+    let s = z * x;
+    let r = y + z * (s * (r + v) + y) + s * T[0];
+    let w = x + r;
+    if big {
+        let sign = hx >> 31;
+        let s = 1.0 - 2.0 * odd as f64;
+        let v = s - 2.0 * (x + (r - w * w / (w + s)));
+        return if sign != 0 { -v } else { v };
+    }
+    if odd == 0 {
+        return w;
+    }
+    /* -1.0/(x+r) has up to 2ulp error, so compute it accurately */
+    let w0 = zero_low_word(w);
+    let v = r - (w0 - x); /* w0+v = r+x */
+    let a = -1.0 / w;
+    let a0 = zero_low_word(a);
+    a0 + a * (1.0 + a0 * w0 + a0 * v)
+}
+
+#[inline]
+fn zero_low_word(x: f64) -> f64 {
+    f64::from_bits(f64::to_bits(x) & 0xFFFF_FFFF_0000_0000)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/k_tanf.rs.html b/src/libm/math/k_tanf.rs.html new file mode 100644 index 000000000..cda78bc7d --- /dev/null +++ b/src/libm/math/k_tanf.rs.html @@ -0,0 +1,97 @@ +k_tanf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+
+/* origin: FreeBSD /usr/src/lib/msun/src/k_tan.c */
+/*
+ * ====================================================
+ * Copyright 2004 Sun Microsystems, Inc.  All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* |tan(x)/x - t(x)| < 2**-25.5 (~[-2e-08, 2e-08]). */
+const T: [f64; 6] = [
+    0.333331395030791399758,   /* 0x15554d3418c99f.0p-54 */
+    0.133392002712976742718,   /* 0x1112fd38999f72.0p-55 */
+    0.0533812378445670393523,  /* 0x1b54c91d865afe.0p-57 */
+    0.0245283181166547278873,  /* 0x191df3908c33ce.0p-58 */
+    0.00297435743359967304927, /* 0x185dadfcecf44e.0p-61 */
+    0.00946564784943673166728, /* 0x1362b9bf971bcd.0p-59 */
+];
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn k_tanf(x: f64, odd: bool) -> f32 {
+    let z = x * x;
+    /*
+     * Split up the polynomial into small independent terms to give
+     * opportunities for parallel evaluation.  The chosen splitting is
+     * micro-optimized for Athlons (XP, X64).  It costs 2 multiplications
+     * relative to Horner's method on sequential machines.
+     *
+     * We add the small terms from lowest degree up for efficiency on
+     * non-sequential machines (the lowest degree terms tend to be ready
+     * earlier).  Apart from this, we don't care about order of
+     * operations, and don't need to to care since we have precision to
+     * spare.  However, the chosen splitting is good for accuracy too,
+     * and would give results as accurate as Horner's method if the
+     * small terms were added from highest degree down.
+     */
+    let mut r = T[4] + z * T[5];
+    let t = T[2] + z * T[3];
+    let w = z * z;
+    let s = z * x;
+    let u = T[0] + z * T[1];
+    r = (x + s * u) + (s * w) * (t + w * r);
+    (if odd { -1. / r } else { r }) as f32
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/ldexp.rs.html b/src/libm/math/ldexp.rs.html new file mode 100644 index 000000000..2cf34024a --- /dev/null +++ b/src/libm/math/ldexp.rs.html @@ -0,0 +1,13 @@ +ldexp.rs.html -- source
1
+2
+3
+4
+5
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn ldexp(x: f64, n: i32) -> f64 {
+    super::scalbn(x, n)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/ldexpf.rs.html b/src/libm/math/ldexpf.rs.html new file mode 100644 index 000000000..084ae1c95 --- /dev/null +++ b/src/libm/math/ldexpf.rs.html @@ -0,0 +1,13 @@ +ldexpf.rs.html -- source
1
+2
+3
+4
+5
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn ldexpf(x: f32, n: i32) -> f32 {
+    super::scalbnf(x, n)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/lgamma.rs.html b/src/libm/math/lgamma.rs.html new file mode 100644 index 000000000..5e399274b --- /dev/null +++ b/src/libm/math/lgamma.rs.html @@ -0,0 +1,13 @@ +lgamma.rs.html -- source
1
+2
+3
+4
+5
+
+use super::lgamma_r;
+
+pub fn lgamma(x: f64) -> f64 {
+    lgamma_r(x).0
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/lgamma_r.rs.html b/src/libm/math/lgamma_r.rs.html new file mode 100644 index 000000000..7b251bd07 --- /dev/null +++ b/src/libm/math/lgamma_r.rs.html @@ -0,0 +1,641 @@ +lgamma_r.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_lgamma_r.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+/* lgamma_r(x, signgamp)
+ * Reentrant version of the logarithm of the Gamma function
+ * with user provide pointer for the sign of Gamma(x).
+ *
+ * Method:
+ *   1. Argument Reduction for 0 < x <= 8
+ *      Since gamma(1+s)=s*gamma(s), for x in [0,8], we may
+ *      reduce x to a number in [1.5,2.5] by
+ *              lgamma(1+s) = log(s) + lgamma(s)
+ *      for example,
+ *              lgamma(7.3) = log(6.3) + lgamma(6.3)
+ *                          = log(6.3*5.3) + lgamma(5.3)
+ *                          = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
+ *   2. Polynomial approximation of lgamma around its
+ *      minimun ymin=1.461632144968362245 to maintain monotonicity.
+ *      On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
+ *              Let z = x-ymin;
+ *              lgamma(x) = -1.214862905358496078218 + z^2*poly(z)
+ *      where
+ *              poly(z) is a 14 degree polynomial.
+ *   2. Rational approximation in the primary interval [2,3]
+ *      We use the following approximation:
+ *              s = x-2.0;
+ *              lgamma(x) = 0.5*s + s*P(s)/Q(s)
+ *      with accuracy
+ *              |P/Q - (lgamma(x)-0.5s)| < 2**-61.71
+ *      Our algorithms are based on the following observation
+ *
+ *                             zeta(2)-1    2    zeta(3)-1    3
+ * lgamma(2+s) = s*(1-Euler) + --------- * s  -  --------- * s  + ...
+ *                                 2                 3
+ *
+ *      where Euler = 0.5771... is the Euler constant, which is very
+ *      close to 0.5.
+ *
+ *   3. For x>=8, we have
+ *      lgamma(x)~(x-0.5)log(x)-x+0.5*log(2pi)+1/(12x)-1/(360x**3)+....
+ *      (better formula:
+ *         lgamma(x)~(x-0.5)*(log(x)-1)-.5*(log(2pi)-1) + ...)
+ *      Let z = 1/x, then we approximation
+ *              f(z) = lgamma(x) - (x-0.5)(log(x)-1)
+ *      by
+ *                                  3       5             11
+ *              w = w0 + w1*z + w2*z  + w3*z  + ... + w6*z
+ *      where
+ *              |w - f(z)| < 2**-58.74
+ *
+ *   4. For negative x, since (G is gamma function)
+ *              -x*G(-x)*G(x) = PI/sin(PI*x),
+ *      we have
+ *              G(x) = PI/(sin(PI*x)*(-x)*G(-x))
+ *      since G(-x) is positive, sign(G(x)) = sign(sin(PI*x)) for x<0
+ *      Hence, for x<0, signgam = sign(sin(PI*x)) and
+ *              lgamma(x) = log(|Gamma(x)|)
+ *                        = log(PI/(|x*sin(PI*x)|)) - lgamma(-x);
+ *      Note: one should avoid compute PI*(-x) directly in the
+ *            computation of sin(PI*(-x)).
+ *
+ *   5. Special Cases
+ *              lgamma(2+s) ~ s*(1-Euler) for tiny s
+ *              lgamma(1) = lgamma(2) = 0
+ *              lgamma(x) ~ -log(|x|) for tiny x
+ *              lgamma(0) = lgamma(neg.integer) = inf and raise divide-by-zero
+ *              lgamma(inf) = inf
+ *              lgamma(-inf) = inf (bug for bug compatible with C99!?)
+ *
+ */
+
+use super::{floor, k_cos, k_sin, log};
+
+const PI: f64 = 3.14159265358979311600e+00; /* 0x400921FB, 0x54442D18 */
+const A0: f64 = 7.72156649015328655494e-02; /* 0x3FB3C467, 0xE37DB0C8 */
+const A1: f64 = 3.22467033424113591611e-01; /* 0x3FD4A34C, 0xC4A60FAD */
+const A2: f64 = 6.73523010531292681824e-02; /* 0x3FB13E00, 0x1A5562A7 */
+const A3: f64 = 2.05808084325167332806e-02; /* 0x3F951322, 0xAC92547B */
+const A4: f64 = 7.38555086081402883957e-03; /* 0x3F7E404F, 0xB68FEFE8 */
+const A5: f64 = 2.89051383673415629091e-03; /* 0x3F67ADD8, 0xCCB7926B */
+const A6: f64 = 1.19270763183362067845e-03; /* 0x3F538A94, 0x116F3F5D */
+const A7: f64 = 5.10069792153511336608e-04; /* 0x3F40B6C6, 0x89B99C00 */
+const A8: f64 = 2.20862790713908385557e-04; /* 0x3F2CF2EC, 0xED10E54D */
+const A9: f64 = 1.08011567247583939954e-04; /* 0x3F1C5088, 0x987DFB07 */
+const A10: f64 = 2.52144565451257326939e-05; /* 0x3EFA7074, 0x428CFA52 */
+const A11: f64 = 4.48640949618915160150e-05; /* 0x3F07858E, 0x90A45837 */
+const TC: f64 = 1.46163214496836224576e+00; /* 0x3FF762D8, 0x6356BE3F */
+const TF: f64 = -1.21486290535849611461e-01; /* 0xBFBF19B9, 0xBCC38A42 */
+/* tt = -(tail of TF) */
+const TT: f64 = -3.63867699703950536541e-18; /* 0xBC50C7CA, 0xA48A971F */
+const T0: f64 = 4.83836122723810047042e-01; /* 0x3FDEF72B, 0xC8EE38A2 */
+const T1: f64 = -1.47587722994593911752e-01; /* 0xBFC2E427, 0x8DC6C509 */
+const T2: f64 = 6.46249402391333854778e-02; /* 0x3FB08B42, 0x94D5419B */
+const T3: f64 = -3.27885410759859649565e-02; /* 0xBFA0C9A8, 0xDF35B713 */
+const T4: f64 = 1.79706750811820387126e-02; /* 0x3F9266E7, 0x970AF9EC */
+const T5: f64 = -1.03142241298341437450e-02; /* 0xBF851F9F, 0xBA91EC6A */
+const T6: f64 = 6.10053870246291332635e-03; /* 0x3F78FCE0, 0xE370E344 */
+const T7: f64 = -3.68452016781138256760e-03; /* 0xBF6E2EFF, 0xB3E914D7 */
+const T8: f64 = 2.25964780900612472250e-03; /* 0x3F6282D3, 0x2E15C915 */
+const T9: f64 = -1.40346469989232843813e-03; /* 0xBF56FE8E, 0xBF2D1AF1 */
+const T10: f64 = 8.81081882437654011382e-04; /* 0x3F4CDF0C, 0xEF61A8E9 */
+const T11: f64 = -5.38595305356740546715e-04; /* 0xBF41A610, 0x9C73E0EC */
+const T12: f64 = 3.15632070903625950361e-04; /* 0x3F34AF6D, 0x6C0EBBF7 */
+const T13: f64 = -3.12754168375120860518e-04; /* 0xBF347F24, 0xECC38C38 */
+const T14: f64 = 3.35529192635519073543e-04; /* 0x3F35FD3E, 0xE8C2D3F4 */
+const U0: f64 = -7.72156649015328655494e-02; /* 0xBFB3C467, 0xE37DB0C8 */
+const U1: f64 = 6.32827064025093366517e-01; /* 0x3FE4401E, 0x8B005DFF */
+const U2: f64 = 1.45492250137234768737e+00; /* 0x3FF7475C, 0xD119BD6F */
+const U3: f64 = 9.77717527963372745603e-01; /* 0x3FEF4976, 0x44EA8450 */
+const U4: f64 = 2.28963728064692451092e-01; /* 0x3FCD4EAE, 0xF6010924 */
+const U5: f64 = 1.33810918536787660377e-02; /* 0x3F8B678B, 0xBF2BAB09 */
+const V1: f64 = 2.45597793713041134822e+00; /* 0x4003A5D7, 0xC2BD619C */
+const V2: f64 = 2.12848976379893395361e+00; /* 0x40010725, 0xA42B18F5 */
+const V3: f64 = 7.69285150456672783825e-01; /* 0x3FE89DFB, 0xE45050AF */
+const V4: f64 = 1.04222645593369134254e-01; /* 0x3FBAAE55, 0xD6537C88 */
+const V5: f64 = 3.21709242282423911810e-03; /* 0x3F6A5ABB, 0x57D0CF61 */
+const S0: f64 = -7.72156649015328655494e-02; /* 0xBFB3C467, 0xE37DB0C8 */
+const S1: f64 = 2.14982415960608852501e-01; /* 0x3FCB848B, 0x36E20878 */
+const S2: f64 = 3.25778796408930981787e-01; /* 0x3FD4D98F, 0x4F139F59 */
+const S3: f64 = 1.46350472652464452805e-01; /* 0x3FC2BB9C, 0xBEE5F2F7 */
+const S4: f64 = 2.66422703033638609560e-02; /* 0x3F9B481C, 0x7E939961 */
+const S5: f64 = 1.84028451407337715652e-03; /* 0x3F5E26B6, 0x7368F239 */
+const S6: f64 = 3.19475326584100867617e-05; /* 0x3F00BFEC, 0xDD17E945 */
+const R1: f64 = 1.39200533467621045958e+00; /* 0x3FF645A7, 0x62C4AB74 */
+const R2: f64 = 7.21935547567138069525e-01; /* 0x3FE71A18, 0x93D3DCDC */
+const R3: f64 = 1.71933865632803078993e-01; /* 0x3FC601ED, 0xCCFBDF27 */
+const R4: f64 = 1.86459191715652901344e-02; /* 0x3F9317EA, 0x742ED475 */
+const R5: f64 = 7.77942496381893596434e-04; /* 0x3F497DDA, 0xCA41A95B */
+const R6: f64 = 7.32668430744625636189e-06; /* 0x3EDEBAF7, 0xA5B38140 */
+const W0: f64 = 4.18938533204672725052e-01; /* 0x3FDACFE3, 0x90C97D69 */
+const W1: f64 = 8.33333333333329678849e-02; /* 0x3FB55555, 0x5555553B */
+const W2: f64 = -2.77777777728775536470e-03; /* 0xBF66C16C, 0x16B02E5C */
+const W3: f64 = 7.93650558643019558500e-04; /* 0x3F4A019F, 0x98CF38B6 */
+const W4: f64 = -5.95187557450339963135e-04; /* 0xBF4380CB, 0x8C0FE741 */
+const W5: f64 = 8.36339918996282139126e-04; /* 0x3F4B67BA, 0x4CDAD5D1 */
+const W6: f64 = -1.63092934096575273989e-03; /* 0xBF5AB89D, 0x0B9E43E4 */
+
+/* sin(PI*x) assuming x > 2^-100, if sin(PI*x)==0 the sign is arbitrary */
+fn sin_pi(mut x: f64) -> f64 {
+    let mut n: i32;
+
+    /* spurious inexact if odd int */
+    x = 2.0 * (x * 0.5 - floor(x * 0.5)); /* x mod 2.0 */
+
+    n = (x * 4.0) as i32;
+    n = (n + 1) / 2;
+    x -= (n as f64) * 0.5;
+    x *= PI;
+
+    match n {
+        1 => k_cos(x, 0.0),
+        2 => k_sin(-x, 0.0, 0),
+        3 => -k_cos(x, 0.0),
+        0 | _ => k_sin(x, 0.0, 0),
+    }
+}
+
+pub fn lgamma_r(mut x: f64) -> (f64, i32) {
+    let u: u64 = x.to_bits();
+    let mut t: f64;
+    let y: f64;
+    let mut z: f64;
+    let nadj: f64;
+    let p: f64;
+    let p1: f64;
+    let p2: f64;
+    let p3: f64;
+    let q: f64;
+    let mut r: f64;
+    let w: f64;
+    let ix: u32;
+    let sign: bool;
+    let i: i32;
+    let mut signgam: i32;
+
+    /* purge off +-inf, NaN, +-0, tiny and negative arguments */
+    signgam = 1;
+    sign = (u >> 63) != 0;
+    ix = ((u >> 32) as u32) & 0x7fffffff;
+    if ix >= 0x7ff00000 {
+        return (x * x, signgam);
+    }
+    if ix < (0x3ff - 70) << 20 {
+        /* |x|<2**-70, return -log(|x|) */
+        if sign {
+            x = -x;
+            signgam = -1;
+        }
+        return (-log(x), signgam);
+    }
+    if sign {
+        x = -x;
+        t = sin_pi(x);
+        if t == 0.0 {
+            /* -integer */
+            return (1.0 / (x - x), signgam);
+        }
+        if t > 0.0 {
+            signgam = -1;
+        } else {
+            t = -t;
+        }
+        nadj = log(PI / (t * x));
+    } else {
+        nadj = 0.0;
+    }
+
+    /* purge off 1 and 2 */
+    if (ix == 0x3ff00000 || ix == 0x40000000) && (u & 0xffffffff) == 0 {
+        r = 0.0;
+    }
+    /* for x < 2.0 */
+    else if ix < 0x40000000 {
+        if ix <= 0x3feccccc {
+            /* lgamma(x) = lgamma(x+1)-log(x) */
+            r = -log(x);
+            if ix >= 0x3FE76944 {
+                y = 1.0 - x;
+                i = 0;
+            } else if ix >= 0x3FCDA661 {
+                y = x - (TC - 1.0);
+                i = 1;
+            } else {
+                y = x;
+                i = 2;
+            }
+        } else {
+            r = 0.0;
+            if ix >= 0x3FFBB4C3 {
+                /* [1.7316,2] */
+                y = 2.0 - x;
+                i = 0;
+            } else if ix >= 0x3FF3B4C4 {
+                /* [1.23,1.73] */
+                y = x - TC;
+                i = 1;
+            } else {
+                y = x - 1.0;
+                i = 2;
+            }
+        }
+        match i {
+            0 => {
+                z = y * y;
+                p1 = A0 + z * (A2 + z * (A4 + z * (A6 + z * (A8 + z * A10))));
+                p2 = z * (A1 + z * (A3 + z * (A5 + z * (A7 + z * (A9 + z * A11)))));
+                p = y * p1 + p2;
+                r += p - 0.5 * y;
+            }
+            1 => {
+                z = y * y;
+                w = z * y;
+                p1 = T0 + w * (T3 + w * (T6 + w * (T9 + w * T12))); /* parallel comp */
+                p2 = T1 + w * (T4 + w * (T7 + w * (T10 + w * T13)));
+                p3 = T2 + w * (T5 + w * (T8 + w * (T11 + w * T14)));
+                p = z * p1 - (TT - w * (p2 + y * p3));
+                r += TF + p;
+            }
+            2 => {
+                p1 = y * (U0 + y * (U1 + y * (U2 + y * (U3 + y * (U4 + y * U5)))));
+                p2 = 1.0 + y * (V1 + y * (V2 + y * (V3 + y * (V4 + y * V5))));
+                r += -0.5 * y + p1 / p2;
+            }
+            #[cfg(feature = "checked")]
+            _ => unreachable!(),
+            #[cfg(not(feature = "checked"))]
+            _ => {}
+        }
+    } else if ix < 0x40200000 {
+        /* x < 8.0 */
+        i = x as i32;
+        y = x - (i as f64);
+        p = y * (S0 + y * (S1 + y * (S2 + y * (S3 + y * (S4 + y * (S5 + y * S6))))));
+        q = 1.0 + y * (R1 + y * (R2 + y * (R3 + y * (R4 + y * (R5 + y * R6)))));
+        r = 0.5 * y + p / q;
+        z = 1.0; /* lgamma(1+s) = log(s) + lgamma(s) */
+        // TODO: In C, this was implemented using switch jumps with fallthrough.
+        // Does this implementation have performance problems?
+        if i >= 7 {
+            z *= y + 6.0;
+        }
+        if i >= 6 {
+            z *= y + 5.0;
+        }
+        if i >= 5 {
+            z *= y + 4.0;
+        }
+        if i >= 4 {
+            z *= y + 3.0;
+        }
+        if i >= 3 {
+            z *= y + 2.0;
+            r += log(z);
+        }
+    } else if ix < 0x43900000 {
+        /* 8.0 <= x < 2**58 */
+        t = log(x);
+        z = 1.0 / x;
+        y = z * z;
+        w = W0 + z * (W1 + y * (W2 + y * (W3 + y * (W4 + y * (W5 + y * W6)))));
+        r = (x - 0.5) * (t - 1.0) + w;
+    } else {
+        /* 2**58 <= x <= inf */
+        r = x * (log(x) - 1.0);
+    }
+    if sign {
+        r = nadj - r;
+    }
+    return (r, signgam);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/lgammaf.rs.html b/src/libm/math/lgammaf.rs.html new file mode 100644 index 000000000..26e4d785d --- /dev/null +++ b/src/libm/math/lgammaf.rs.html @@ -0,0 +1,13 @@ +lgammaf.rs.html -- source
1
+2
+3
+4
+5
+
+use super::lgammaf_r;
+
+pub fn lgammaf(x: f32) -> f32 {
+    lgammaf_r(x).0
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/lgammaf_r.rs.html b/src/libm/math/lgammaf_r.rs.html new file mode 100644 index 000000000..8baa188fc --- /dev/null +++ b/src/libm/math/lgammaf_r.rs.html @@ -0,0 +1,511 @@ +lgammaf_r.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_lgammaf_r.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{floorf, k_cosf, k_sinf, logf};
+
+const PI: f32 = 3.1415927410e+00; /* 0x40490fdb */
+const A0: f32 = 7.7215664089e-02; /* 0x3d9e233f */
+const A1: f32 = 3.2246702909e-01; /* 0x3ea51a66 */
+const A2: f32 = 6.7352302372e-02; /* 0x3d89f001 */
+const A3: f32 = 2.0580807701e-02; /* 0x3ca89915 */
+const A4: f32 = 7.3855509982e-03; /* 0x3bf2027e */
+const A5: f32 = 2.8905137442e-03; /* 0x3b3d6ec6 */
+const A6: f32 = 1.1927076848e-03; /* 0x3a9c54a1 */
+const A7: f32 = 5.1006977446e-04; /* 0x3a05b634 */
+const A8: f32 = 2.2086278477e-04; /* 0x39679767 */
+const A9: f32 = 1.0801156895e-04; /* 0x38e28445 */
+const A10: f32 = 2.5214456400e-05; /* 0x37d383a2 */
+const A11: f32 = 4.4864096708e-05; /* 0x383c2c75 */
+const TC: f32 = 1.4616321325e+00; /* 0x3fbb16c3 */
+const TF: f32 = -1.2148628384e-01; /* 0xbdf8cdcd */
+/* TT = -(tail of TF) */
+const TT: f32 = 6.6971006518e-09; /* 0x31e61c52 */
+const T0: f32 = 4.8383611441e-01; /* 0x3ef7b95e */
+const T1: f32 = -1.4758771658e-01; /* 0xbe17213c */
+const T2: f32 = 6.4624942839e-02; /* 0x3d845a15 */
+const T3: f32 = -3.2788541168e-02; /* 0xbd064d47 */
+const T4: f32 = 1.7970675603e-02; /* 0x3c93373d */
+const T5: f32 = -1.0314224288e-02; /* 0xbc28fcfe */
+const T6: f32 = 6.1005386524e-03; /* 0x3bc7e707 */
+const T7: f32 = -3.6845202558e-03; /* 0xbb7177fe */
+const T8: f32 = 2.2596477065e-03; /* 0x3b141699 */
+const T9: f32 = -1.4034647029e-03; /* 0xbab7f476 */
+const T10: f32 = 8.8108185446e-04; /* 0x3a66f867 */
+const T11: f32 = -5.3859531181e-04; /* 0xba0d3085 */
+const T12: f32 = 3.1563205994e-04; /* 0x39a57b6b */
+const T13: f32 = -3.1275415677e-04; /* 0xb9a3f927 */
+const T14: f32 = 3.3552918467e-04; /* 0x39afe9f7 */
+const U0: f32 = -7.7215664089e-02; /* 0xbd9e233f */
+const U1: f32 = 6.3282704353e-01; /* 0x3f2200f4 */
+const U2: f32 = 1.4549225569e+00; /* 0x3fba3ae7 */
+const U3: f32 = 9.7771751881e-01; /* 0x3f7a4bb2 */
+const U4: f32 = 2.2896373272e-01; /* 0x3e6a7578 */
+const U5: f32 = 1.3381091878e-02; /* 0x3c5b3c5e */
+const V1: f32 = 2.4559779167e+00; /* 0x401d2ebe */
+const V2: f32 = 2.1284897327e+00; /* 0x4008392d */
+const V3: f32 = 7.6928514242e-01; /* 0x3f44efdf */
+const V4: f32 = 1.0422264785e-01; /* 0x3dd572af */
+const V5: f32 = 3.2170924824e-03; /* 0x3b52d5db */
+const S0: f32 = -7.7215664089e-02; /* 0xbd9e233f */
+const S1: f32 = 2.1498242021e-01; /* 0x3e5c245a */
+const S2: f32 = 3.2577878237e-01; /* 0x3ea6cc7a */
+const S3: f32 = 1.4635047317e-01; /* 0x3e15dce6 */
+const S4: f32 = 2.6642270386e-02; /* 0x3cda40e4 */
+const S5: f32 = 1.8402845599e-03; /* 0x3af135b4 */
+const S6: f32 = 3.1947532989e-05; /* 0x3805ff67 */
+const R1: f32 = 1.3920053244e+00; /* 0x3fb22d3b */
+const R2: f32 = 7.2193557024e-01; /* 0x3f38d0c5 */
+const R3: f32 = 1.7193385959e-01; /* 0x3e300f6e */
+const R4: f32 = 1.8645919859e-02; /* 0x3c98bf54 */
+const R5: f32 = 7.7794247773e-04; /* 0x3a4beed6 */
+const R6: f32 = 7.3266842264e-06; /* 0x36f5d7bd */
+const W0: f32 = 4.1893854737e-01; /* 0x3ed67f1d */
+const W1: f32 = 8.3333335817e-02; /* 0x3daaaaab */
+const W2: f32 = -2.7777778450e-03; /* 0xbb360b61 */
+const W3: f32 = 7.9365057172e-04; /* 0x3a500cfd */
+const W4: f32 = -5.9518753551e-04; /* 0xba1c065c */
+const W5: f32 = 8.3633989561e-04; /* 0x3a5b3dd2 */
+const W6: f32 = -1.6309292987e-03; /* 0xbad5c4e8 */
+
+/* sin(PI*x) assuming x > 2^-100, if sin(PI*x)==0 the sign is arbitrary */
+fn sin_pi(mut x: f32) -> f32 {
+    let mut y: f64;
+    let mut n: isize;
+
+    /* spurious inexact if odd int */
+    x = 2.0 * (x * 0.5 - floorf(x * 0.5)); /* x mod 2.0 */
+
+    n = (x * 4.0) as isize;
+    n = (n + 1) / 2;
+    y = (x as f64) - (n as f64) * 0.5;
+    y *= 3.14159265358979323846;
+    match n {
+        1 => k_cosf(y),
+        2 => k_sinf(-y),
+        3 => -k_cosf(y),
+        0 | _ => k_sinf(y),
+    }
+}
+
+pub fn lgammaf_r(mut x: f32) -> (f32, i32) {
+    let u = x.to_bits();
+    let mut t: f32;
+    let y: f32;
+    let mut z: f32;
+    let nadj: f32;
+    let p: f32;
+    let p1: f32;
+    let p2: f32;
+    let p3: f32;
+    let q: f32;
+    let mut r: f32;
+    let w: f32;
+    let ix: u32;
+    let i: i32;
+    let sign: bool;
+    let mut signgam: i32;
+
+    /* purge off +-inf, NaN, +-0, tiny and negative arguments */
+    signgam = 1;
+    sign = (u >> 31) != 0;
+    ix = u & 0x7fffffff;
+    if ix >= 0x7f800000 {
+        return (x * x, signgam);
+    }
+    if ix < 0x35000000 {
+        /* |x| < 2**-21, return -log(|x|) */
+        if sign {
+            signgam = -1;
+            x = -x;
+        }
+        return (-logf(x), signgam);
+    }
+    if sign {
+        x = -x;
+        t = sin_pi(x);
+        if t == 0.0 {
+            /* -integer */
+            return (1.0 / (x - x), signgam);
+        }
+        if t > 0.0 {
+            signgam = -1;
+        } else {
+            t = -t;
+        }
+        nadj = logf(PI / (t * x));
+    } else {
+        nadj = 0.0;
+    }
+
+    /* purge off 1 and 2 */
+    if ix == 0x3f800000 || ix == 0x40000000 {
+        r = 0.0;
+    }
+    /* for x < 2.0 */
+    else if ix < 0x40000000 {
+        if ix <= 0x3f666666 {
+            /* lgamma(x) = lgamma(x+1)-log(x) */
+            r = -logf(x);
+            if ix >= 0x3f3b4a20 {
+                y = 1.0 - x;
+                i = 0;
+            } else if ix >= 0x3e6d3308 {
+                y = x - (TC - 1.0);
+                i = 1;
+            } else {
+                y = x;
+                i = 2;
+            }
+        } else {
+            r = 0.0;
+            if ix >= 0x3fdda618 {
+                /* [1.7316,2] */
+                y = 2.0 - x;
+                i = 0;
+            } else if ix >= 0x3F9da620 {
+                /* [1.23,1.73] */
+                y = x - TC;
+                i = 1;
+            } else {
+                y = x - 1.0;
+                i = 2;
+            }
+        }
+        match i {
+            0 => {
+                z = y * y;
+                p1 = A0 + z * (A2 + z * (A4 + z * (A6 + z * (A8 + z * A10))));
+                p2 = z * (A1 + z * (A3 + z * (A5 + z * (A7 + z * (A9 + z * A11)))));
+                p = y * p1 + p2;
+                r += p - 0.5 * y;
+            }
+            1 => {
+                z = y * y;
+                w = z * y;
+                p1 = T0 + w * (T3 + w * (T6 + w * (T9 + w * T12))); /* parallel comp */
+                p2 = T1 + w * (T4 + w * (T7 + w * (T10 + w * T13)));
+                p3 = T2 + w * (T5 + w * (T8 + w * (T11 + w * T14)));
+                p = z * p1 - (TT - w * (p2 + y * p3));
+                r += TF + p;
+            }
+            2 => {
+                p1 = y * (U0 + y * (U1 + y * (U2 + y * (U3 + y * (U4 + y * U5)))));
+                p2 = 1.0 + y * (V1 + y * (V2 + y * (V3 + y * (V4 + y * V5))));
+                r += -0.5 * y + p1 / p2;
+            }
+            #[cfg(feature = "checked")]
+            _ => unreachable!(),
+            #[cfg(not(feature = "checked"))]
+            _ => {}
+        }
+    } else if ix < 0x41000000 {
+        /* x < 8.0 */
+        i = x as i32;
+        y = x - (i as f32);
+        p = y * (S0 + y * (S1 + y * (S2 + y * (S3 + y * (S4 + y * (S5 + y * S6))))));
+        q = 1.0 + y * (R1 + y * (R2 + y * (R3 + y * (R4 + y * (R5 + y * R6)))));
+        r = 0.5 * y + p / q;
+        z = 1.0; /* lgamma(1+s) = log(s) + lgamma(s) */
+        // TODO: In C, this was implemented using switch jumps with fallthrough.
+        // Does this implementation have performance problems?
+        if i >= 7 {
+            z *= y + 6.0;
+        }
+        if i >= 6 {
+            z *= y + 5.0;
+        }
+        if i >= 5 {
+            z *= y + 4.0;
+        }
+        if i >= 4 {
+            z *= y + 3.0;
+        }
+        if i >= 3 {
+            z *= y + 2.0;
+            r += logf(z);
+        }
+    } else if ix < 0x5c800000 {
+        /* 8.0 <= x < 2**58 */
+        t = logf(x);
+        z = 1.0 / x;
+        y = z * z;
+        w = W0 + z * (W1 + y * (W2 + y * (W3 + y * (W4 + y * (W5 + y * W6)))));
+        r = (x - 0.5) * (t - 1.0) + w;
+    } else {
+        /* 2**58 <= x <= inf */
+        r = x * (logf(x) - 1.0);
+    }
+    if sign {
+        r = nadj - r;
+    }
+    return (r, signgam);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/log.rs.html b/src/libm/math/log.rs.html new file mode 100644 index 000000000..55ff01e97 --- /dev/null +++ b/src/libm/math/log.rs.html @@ -0,0 +1,239 @@ +log.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_log.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* log(x)
+ * Return the logarithm of x
+ *
+ * Method :
+ *   1. Argument Reduction: find k and f such that
+ *                      x = 2^k * (1+f),
+ *         where  sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ *   2. Approximation of log(1+f).
+ *      Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ *               = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ *               = 2s + s*R
+ *      We use a special Remez algorithm on [0,0.1716] to generate
+ *      a polynomial of degree 14 to approximate R The maximum error
+ *      of this polynomial approximation is bounded by 2**-58.45. In
+ *      other words,
+ *                      2      4      6      8      10      12      14
+ *          R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s  +Lg6*s  +Lg7*s
+ *      (the values of Lg1 to Lg7 are listed in the program)
+ *      and
+ *          |      2          14          |     -58.45
+ *          | Lg1*s +...+Lg7*s    -  R(z) | <= 2
+ *          |                             |
+ *      Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ *      In order to guarantee error in log below 1ulp, we compute log
+ *      by
+ *              log(1+f) = f - s*(f - R)        (if f is not too large)
+ *              log(1+f) = f - (hfsq - s*(hfsq+R)).     (better accuracy)
+ *
+ *      3. Finally,  log(x) = k*ln2 + log(1+f).
+ *                          = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ *         Here ln2 is split into two floating point number:
+ *                      ln2_hi + ln2_lo,
+ *         where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ *      log(x) is NaN with signal if x < 0 (including -INF) ;
+ *      log(+INF) is +INF; log(0) is -INF with signal;
+ *      log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ *      according to an error analysis, the error is always less than
+ *      1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+const LN2_HI: f64 = 6.93147180369123816490e-01; /* 3fe62e42 fee00000 */
+const LN2_LO: f64 = 1.90821492927058770002e-10; /* 3dea39ef 35793c76 */
+const LG1: f64 = 6.666666666666735130e-01; /* 3FE55555 55555593 */
+const LG2: f64 = 3.999999999940941908e-01; /* 3FD99999 9997FA04 */
+const LG3: f64 = 2.857142874366239149e-01; /* 3FD24924 94229359 */
+const LG4: f64 = 2.222219843214978396e-01; /* 3FCC71C5 1D8E78AF */
+const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
+const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
+const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn log(mut x: f64) -> f64 {
+    let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
+
+    let mut ui = x.to_bits();
+    let mut hx: u32 = (ui >> 32) as u32;
+    let mut k: i32 = 0;
+
+    if (hx < 0x00100000) || ((hx >> 31) != 0) {
+        /* x < 2**-126  */
+        if ui << 1 == 0 {
+            return -1. / (x * x); /* log(+-0)=-inf */
+        }
+        if hx >> 31 != 0 {
+            return (x - x) / 0.0; /* log(-#) = NaN */
+        }
+        /* subnormal number, scale x up */
+        k -= 54;
+        x *= x1p54;
+        ui = x.to_bits();
+        hx = (ui >> 32) as u32;
+    } else if hx >= 0x7ff00000 {
+        return x;
+    } else if hx == 0x3ff00000 && ui << 32 == 0 {
+        return 0.;
+    }
+
+    /* reduce x into [sqrt(2)/2, sqrt(2)] */
+    hx += 0x3ff00000 - 0x3fe6a09e;
+    k += ((hx >> 20) as i32) - 0x3ff;
+    hx = (hx & 0x000fffff) + 0x3fe6a09e;
+    ui = ((hx as u64) << 32) | (ui & 0xffffffff);
+    x = f64::from_bits(ui);
+
+    let f: f64 = x - 1.0;
+    let hfsq: f64 = 0.5 * f * f;
+    let s: f64 = f / (2.0 + f);
+    let z: f64 = s * s;
+    let w: f64 = z * z;
+    let t1: f64 = w * (LG2 + w * (LG4 + w * LG6));
+    let t2: f64 = z * (LG1 + w * (LG3 + w * (LG5 + w * LG7)));
+    let r: f64 = t2 + t1;
+    let dk: f64 = k as f64;
+    s * (hfsq + r) + dk * LN2_LO - hfsq + f + dk * LN2_HI
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/log10.rs.html b/src/libm/math/log10.rs.html new file mode 100644 index 000000000..a851394c6 --- /dev/null +++ b/src/libm/math/log10.rs.html @@ -0,0 +1,239 @@ +log10.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_log10.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/*
+ * Return the base 10 logarithm of x.  See log.c for most comments.
+ *
+ * Reduce x to 2^k (1+f) and calculate r = log(1+f) - f + f*f/2
+ * as in log.c, then combine and scale in extra precision:
+ *    log10(x) = (f - f*f/2 + r)/log(10) + k*log10(2)
+ */
+
+use core::f64;
+
+const IVLN10HI: f64 = 4.34294481878168880939e-01; /* 0x3fdbcb7b, 0x15200000 */
+const IVLN10LO: f64 = 2.50829467116452752298e-11; /* 0x3dbb9438, 0xca9aadd5 */
+const LOG10_2HI: f64 = 3.01029995663611771306e-01; /* 0x3FD34413, 0x509F6000 */
+const LOG10_2LO: f64 = 3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
+const LG1: f64 = 6.666666666666735130e-01; /* 3FE55555 55555593 */
+const LG2: f64 = 3.999999999940941908e-01; /* 3FD99999 9997FA04 */
+const LG3: f64 = 2.857142874366239149e-01; /* 3FD24924 94229359 */
+const LG4: f64 = 2.222219843214978396e-01; /* 3FCC71C5 1D8E78AF */
+const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
+const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
+const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn log10(mut x: f64) -> f64 {
+    let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
+
+    let mut ui: u64 = x.to_bits();
+    let hfsq: f64;
+    let f: f64;
+    let s: f64;
+    let z: f64;
+    let r: f64;
+    let mut w: f64;
+    let t1: f64;
+    let t2: f64;
+    let dk: f64;
+    let y: f64;
+    let mut hi: f64;
+    let lo: f64;
+    let mut val_hi: f64;
+    let mut val_lo: f64;
+    let mut hx: u32;
+    let mut k: i32;
+
+    hx = (ui >> 32) as u32;
+    k = 0;
+    if hx < 0x00100000 || (hx >> 31) > 0 {
+        if ui << 1 == 0 {
+            return -1. / (x * x); /* log(+-0)=-inf */
+        }
+        if (hx >> 31) > 0 {
+            return (x - x) / 0.0; /* log(-#) = NaN */
+        }
+        /* subnormal number, scale x up */
+        k -= 54;
+        x *= x1p54;
+        ui = x.to_bits();
+        hx = (ui >> 32) as u32;
+    } else if hx >= 0x7ff00000 {
+        return x;
+    } else if hx == 0x3ff00000 && ui << 32 == 0 {
+        return 0.;
+    }
+
+    /* reduce x into [sqrt(2)/2, sqrt(2)] */
+    hx += 0x3ff00000 - 0x3fe6a09e;
+    k += (hx >> 20) as i32 - 0x3ff;
+    hx = (hx & 0x000fffff) + 0x3fe6a09e;
+    ui = (hx as u64) << 32 | (ui & 0xffffffff);
+    x = f64::from_bits(ui);
+
+    f = x - 1.0;
+    hfsq = 0.5 * f * f;
+    s = f / (2.0 + f);
+    z = s * s;
+    w = z * z;
+    t1 = w * (LG2 + w * (LG4 + w * LG6));
+    t2 = z * (LG1 + w * (LG3 + w * (LG5 + w * LG7)));
+    r = t2 + t1;
+
+    /* See log2.c for details. */
+    /* hi+lo = f - hfsq + s*(hfsq+R) ~ log(1+f) */
+    hi = f - hfsq;
+    ui = hi.to_bits();
+    ui &= (-1i64 as u64) << 32;
+    hi = f64::from_bits(ui);
+    lo = f - hi - hfsq + s * (hfsq + r);
+
+    /* val_hi+val_lo ~ log10(1+f) + k*log10(2) */
+    val_hi = hi * IVLN10HI;
+    dk = k as f64;
+    y = dk * LOG10_2HI;
+    val_lo = dk * LOG10_2LO + (lo + hi) * IVLN10LO + lo * IVLN10HI;
+
+    /*
+     * Extra precision in for adding y is not strictly needed
+     * since there is no very large cancellation near x = sqrt(2) or
+     * x = 1/sqrt(2), but we do it anyway since it costs little on CPUs
+     * with some parallelism and it reduces the error for many args.
+     */
+    w = y + val_hi;
+    val_lo += (y - w) + val_hi;
+    val_hi = w;
+
+    val_lo + val_hi
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/log10f.rs.html b/src/libm/math/log10f.rs.html new file mode 100644 index 000000000..21f7d2279 --- /dev/null +++ b/src/libm/math/log10f.rs.html @@ -0,0 +1,187 @@ +log10f.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_log10f.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/*
+ * See comments in log10.c.
+ */
+
+use core::f32;
+
+const IVLN10HI: f32 = 4.3432617188e-01; /* 0x3ede6000 */
+const IVLN10LO: f32 = -3.1689971365e-05; /* 0xb804ead9 */
+const LOG10_2HI: f32 = 3.0102920532e-01; /* 0x3e9a2080 */
+const LOG10_2LO: f32 = 7.9034151668e-07; /* 0x355427db */
+/* |(log(1+s)-log(1-s))/s - Lg(s)| < 2**-34.24 (~[-4.95e-11, 4.97e-11]). */
+const LG1: f32 = 0.66666662693; /* 0xaaaaaa.0p-24 */
+const LG2: f32 = 0.40000972152; /* 0xccce13.0p-25 */
+const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
+const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn log10f(mut x: f32) -> f32 {
+    let x1p25f = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
+
+    let mut ui: u32 = x.to_bits();
+    let hfsq: f32;
+    let f: f32;
+    let s: f32;
+    let z: f32;
+    let r: f32;
+    let w: f32;
+    let t1: f32;
+    let t2: f32;
+    let dk: f32;
+    let mut hi: f32;
+    let lo: f32;
+    let mut ix: u32;
+    let mut k: i32;
+
+    ix = ui;
+    k = 0;
+    if ix < 0x00800000 || (ix >> 31) > 0 {
+        /* x < 2**-126  */
+        if ix << 1 == 0 {
+            return -1. / (x * x); /* log(+-0)=-inf */
+        }
+        if (ix >> 31) > 0 {
+            return (x - x) / 0.0; /* log(-#) = NaN */
+        }
+        /* subnormal number, scale up x */
+        k -= 25;
+        x *= x1p25f;
+        ui = x.to_bits();
+        ix = ui;
+    } else if ix >= 0x7f800000 {
+        return x;
+    } else if ix == 0x3f800000 {
+        return 0.;
+    }
+
+    /* reduce x into [sqrt(2)/2, sqrt(2)] */
+    ix += 0x3f800000 - 0x3f3504f3;
+    k += (ix >> 23) as i32 - 0x7f;
+    ix = (ix & 0x007fffff) + 0x3f3504f3;
+    ui = ix;
+    x = f32::from_bits(ui);
+
+    f = x - 1.0;
+    s = f / (2.0 + f);
+    z = s * s;
+    w = z * z;
+    t1 = w * (LG2 + w * LG4);
+    t2 = z * (LG1 + w * LG3);
+    r = t2 + t1;
+    hfsq = 0.5 * f * f;
+
+    hi = f - hfsq;
+    ui = hi.to_bits();
+    ui &= 0xfffff000;
+    hi = f32::from_bits(ui);
+    lo = f - hi - hfsq + s * (hfsq + r);
+    dk = k as f32;
+    dk * LOG10_2LO + (lo + hi) * IVLN10LO + lo * IVLN10HI + hi * IVLN10HI + dk * LOG10_2HI
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/log1p.rs.html b/src/libm/math/log1p.rs.html new file mode 100644 index 000000000..bbc40245f --- /dev/null +++ b/src/libm/math/log1p.rs.html @@ -0,0 +1,291 @@ +log1p.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_log1p.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* double log1p(double x)
+ * Return the natural logarithm of 1+x.
+ *
+ * Method :
+ *   1. Argument Reduction: find k and f such that
+ *                      1+x = 2^k * (1+f),
+ *         where  sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ *      Note. If k=0, then f=x is exact. However, if k!=0, then f
+ *      may not be representable exactly. In that case, a correction
+ *      term is need. Let u=1+x rounded. Let c = (1+x)-u, then
+ *      log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
+ *      and add back the correction term c/u.
+ *      (Note: when x > 2**53, one can simply return log(x))
+ *
+ *   2. Approximation of log(1+f): See log.c
+ *
+ *   3. Finally, log1p(x) = k*ln2 + log(1+f) + c/u. See log.c
+ *
+ * Special cases:
+ *      log1p(x) is NaN with signal if x < -1 (including -INF) ;
+ *      log1p(+INF) is +INF; log1p(-1) is -INF with signal;
+ *      log1p(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ *      according to an error analysis, the error is always less than
+ *      1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ *
+ * Note: Assuming log() return accurate answer, the following
+ *       algorithm can be used to compute log1p(x) to within a few ULP:
+ *
+ *              u = 1+x;
+ *              if(u==1.0) return x ; else
+ *                         return log(u)*(x/(u-1.0));
+ *
+ *       See HP-15C Advanced Functions Handbook, p.193.
+ */
+
+use core::f64;
+
+const LN2_HI: f64 = 6.93147180369123816490e-01; /* 3fe62e42 fee00000 */
+const LN2_LO: f64 = 1.90821492927058770002e-10; /* 3dea39ef 35793c76 */
+const LG1: f64 = 6.666666666666735130e-01; /* 3FE55555 55555593 */
+const LG2: f64 = 3.999999999940941908e-01; /* 3FD99999 9997FA04 */
+const LG3: f64 = 2.857142874366239149e-01; /* 3FD24924 94229359 */
+const LG4: f64 = 2.222219843214978396e-01; /* 3FCC71C5 1D8E78AF */
+const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
+const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
+const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn log1p(x: f64) -> f64 {
+    let mut ui: u64 = x.to_bits();
+    let hfsq: f64;
+    let mut f: f64 = 0.;
+    let mut c: f64 = 0.;
+    let s: f64;
+    let z: f64;
+    let r: f64;
+    let w: f64;
+    let t1: f64;
+    let t2: f64;
+    let dk: f64;
+    let hx: u32;
+    let mut hu: u32;
+    let mut k: i32;
+
+    hx = (ui >> 32) as u32;
+    k = 1;
+    if hx < 0x3fda827a || (hx >> 31) > 0 {
+        /* 1+x < sqrt(2)+ */
+        if hx >= 0xbff00000 {
+            /* x <= -1.0 */
+            if x == -1. {
+                return x / 0.0; /* log1p(-1) = -inf */
+            }
+            return (x - x) / 0.0; /* log1p(x<-1) = NaN */
+        }
+        if hx << 1 < 0x3ca00000 << 1 {
+            /* |x| < 2**-53 */
+            /* underflow if subnormal */
+            if (hx & 0x7ff00000) == 0 {
+                force_eval!(x as f32);
+            }
+            return x;
+        }
+        if hx <= 0xbfd2bec4 {
+            /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
+            k = 0;
+            c = 0.;
+            f = x;
+        }
+    } else if hx >= 0x7ff00000 {
+        return x;
+    }
+    if k > 0 {
+        ui = (1. + x).to_bits();
+        hu = (ui >> 32) as u32;
+        hu += 0x3ff00000 - 0x3fe6a09e;
+        k = (hu >> 20) as i32 - 0x3ff;
+        /* correction term ~ log(1+x)-log(u), avoid underflow in c/u */
+        if k < 54 {
+            c = if k >= 2 {
+                1. - (f64::from_bits(ui) - x)
+            } else {
+                x - (f64::from_bits(ui) - 1.)
+            };
+            c /= f64::from_bits(ui);
+        } else {
+            c = 0.;
+        }
+        /* reduce u into [sqrt(2)/2, sqrt(2)] */
+        hu = (hu & 0x000fffff) + 0x3fe6a09e;
+        ui = (hu as u64) << 32 | (ui & 0xffffffff);
+        f = f64::from_bits(ui) - 1.;
+    }
+    hfsq = 0.5 * f * f;
+    s = f / (2.0 + f);
+    z = s * s;
+    w = z * z;
+    t1 = w * (LG2 + w * (LG4 + w * LG6));
+    t2 = z * (LG1 + w * (LG3 + w * (LG5 + w * LG7)));
+    r = t2 + t1;
+    dk = k as f64;
+    s * (hfsq + r) + (dk * LN2_LO + c) - hfsq + f + dk * LN2_HI
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/log1pf.rs.html b/src/libm/math/log1pf.rs.html new file mode 100644 index 000000000..58befba79 --- /dev/null +++ b/src/libm/math/log1pf.rs.html @@ -0,0 +1,201 @@ +log1pf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_log1pf.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use core::f32;
+
+const LN2_HI: f32 = 6.9313812256e-01; /* 0x3f317180 */
+const LN2_LO: f32 = 9.0580006145e-06; /* 0x3717f7d1 */
+/* |(log(1+s)-log(1-s))/s - Lg(s)| < 2**-34.24 (~[-4.95e-11, 4.97e-11]). */
+const LG1: f32 = 0.66666662693; /* 0xaaaaaa.0p-24 */
+const LG2: f32 = 0.40000972152; /* 0xccce13.0p-25 */
+const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
+const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn log1pf(x: f32) -> f32 {
+    let mut ui: u32 = x.to_bits();
+    let hfsq: f32;
+    let mut f: f32 = 0.;
+    let mut c: f32 = 0.;
+    let s: f32;
+    let z: f32;
+    let r: f32;
+    let w: f32;
+    let t1: f32;
+    let t2: f32;
+    let dk: f32;
+    let ix: u32;
+    let mut iu: u32;
+    let mut k: i32;
+
+    ix = ui;
+    k = 1;
+    if ix < 0x3ed413d0 || (ix >> 31) > 0 {
+        /* 1+x < sqrt(2)+  */
+        if ix >= 0xbf800000 {
+            /* x <= -1.0 */
+            if x == -1. {
+                return x / 0.0; /* log1p(-1)=+inf */
+            }
+            return (x - x) / 0.0; /* log1p(x<-1)=NaN */
+        }
+        if ix << 1 < 0x33800000 << 1 {
+            /* |x| < 2**-24 */
+            /* underflow if subnormal */
+            if (ix & 0x7f800000) == 0 {
+                force_eval!(x * x);
+            }
+            return x;
+        }
+        if ix <= 0xbe95f619 {
+            /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
+            k = 0;
+            c = 0.;
+            f = x;
+        }
+    } else if ix >= 0x7f800000 {
+        return x;
+    }
+    if k > 0 {
+        ui = (1. + x).to_bits();
+        iu = ui;
+        iu += 0x3f800000 - 0x3f3504f3;
+        k = (iu >> 23) as i32 - 0x7f;
+        /* correction term ~ log(1+x)-log(u), avoid underflow in c/u */
+        if k < 25 {
+            c = if k >= 2 {
+                1. - (f32::from_bits(ui) - x)
+            } else {
+                x - (f32::from_bits(ui) - 1.)
+            };
+            c /= f32::from_bits(ui);
+        } else {
+            c = 0.;
+        }
+        /* reduce u into [sqrt(2)/2, sqrt(2)] */
+        iu = (iu & 0x007fffff) + 0x3f3504f3;
+        ui = iu;
+        f = f32::from_bits(ui) - 1.;
+    }
+    s = f / (2.0 + f);
+    z = s * s;
+    w = z * z;
+    t1 = w * (LG2 + w * LG4);
+    t2 = z * (LG1 + w * LG3);
+    r = t2 + t1;
+    hfsq = 0.5 * f * f;
+    dk = k as f32;
+    s * (hfsq + r) + (dk * LN2_LO + c) - hfsq + f + dk * LN2_HI
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/log2.rs.html b/src/libm/math/log2.rs.html new file mode 100644 index 000000000..852460f8c --- /dev/null +++ b/src/libm/math/log2.rs.html @@ -0,0 +1,217 @@ +log2.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_log2.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/*
+ * Return the base 2 logarithm of x.  See log.c for most comments.
+ *
+ * Reduce x to 2^k (1+f) and calculate r = log(1+f) - f + f*f/2
+ * as in log.c, then combine and scale in extra precision:
+ *    log2(x) = (f - f*f/2 + r)/log(2) + k
+ */
+
+use core::f64;
+
+const IVLN2HI: f64 = 1.44269504072144627571e+00; /* 0x3ff71547, 0x65200000 */
+const IVLN2LO: f64 = 1.67517131648865118353e-10; /* 0x3de705fc, 0x2eefa200 */
+const LG1: f64 = 6.666666666666735130e-01; /* 3FE55555 55555593 */
+const LG2: f64 = 3.999999999940941908e-01; /* 3FD99999 9997FA04 */
+const LG3: f64 = 2.857142874366239149e-01; /* 3FD24924 94229359 */
+const LG4: f64 = 2.222219843214978396e-01; /* 3FCC71C5 1D8E78AF */
+const LG5: f64 = 1.818357216161805012e-01; /* 3FC74664 96CB03DE */
+const LG6: f64 = 1.531383769920937332e-01; /* 3FC39A09 D078C69F */
+const LG7: f64 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn log2(mut x: f64) -> f64 {
+    let x1p54 = f64::from_bits(0x4350000000000000); // 0x1p54 === 2 ^ 54
+
+    let mut ui: u64 = x.to_bits();
+    let hfsq: f64;
+    let f: f64;
+    let s: f64;
+    let z: f64;
+    let r: f64;
+    let mut w: f64;
+    let t1: f64;
+    let t2: f64;
+    let y: f64;
+    let mut hi: f64;
+    let lo: f64;
+    let mut val_hi: f64;
+    let mut val_lo: f64;
+    let mut hx: u32;
+    let mut k: i32;
+
+    hx = (ui >> 32) as u32;
+    k = 0;
+    if hx < 0x00100000 || (hx >> 31) > 0 {
+        if ui << 1 == 0 {
+            return -1. / (x * x); /* log(+-0)=-inf */
+        }
+        if (hx >> 31) > 0 {
+            return (x - x) / 0.0; /* log(-#) = NaN */
+        }
+        /* subnormal number, scale x up */
+        k -= 54;
+        x *= x1p54;
+        ui = x.to_bits();
+        hx = (ui >> 32) as u32;
+    } else if hx >= 0x7ff00000 {
+        return x;
+    } else if hx == 0x3ff00000 && ui << 32 == 0 {
+        return 0.;
+    }
+
+    /* reduce x into [sqrt(2)/2, sqrt(2)] */
+    hx += 0x3ff00000 - 0x3fe6a09e;
+    k += (hx >> 20) as i32 - 0x3ff;
+    hx = (hx & 0x000fffff) + 0x3fe6a09e;
+    ui = (hx as u64) << 32 | (ui & 0xffffffff);
+    x = f64::from_bits(ui);
+
+    f = x - 1.0;
+    hfsq = 0.5 * f * f;
+    s = f / (2.0 + f);
+    z = s * s;
+    w = z * z;
+    t1 = w * (LG2 + w * (LG4 + w * LG6));
+    t2 = z * (LG1 + w * (LG3 + w * (LG5 + w * LG7)));
+    r = t2 + t1;
+
+    /* hi+lo = f - hfsq + s*(hfsq+R) ~ log(1+f) */
+    hi = f - hfsq;
+    ui = hi.to_bits();
+    ui &= (-1i64 as u64) << 32;
+    hi = f64::from_bits(ui);
+    lo = f - hi - hfsq + s * (hfsq + r);
+
+    val_hi = hi * IVLN2HI;
+    val_lo = (lo + hi) * IVLN2LO + lo * IVLN2HI;
+
+    /* spadd(val_hi, val_lo, y), except for not using double_t: */
+    y = k.into();
+    w = y + val_hi;
+    val_lo += (y - w) + val_hi;
+    val_hi = w;
+
+    val_lo + val_hi
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/log2f.rs.html b/src/libm/math/log2f.rs.html new file mode 100644 index 000000000..7d8ca8715 --- /dev/null +++ b/src/libm/math/log2f.rs.html @@ -0,0 +1,179 @@ +log2f.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_log2f.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/*
+ * See comments in log2.c.
+ */
+
+use core::f32;
+
+const IVLN2HI: f32 = 1.4428710938e+00; /* 0x3fb8b000 */
+const IVLN2LO: f32 = -1.7605285393e-04; /* 0xb9389ad4 */
+/* |(log(1+s)-log(1-s))/s - Lg(s)| < 2**-34.24 (~[-4.95e-11, 4.97e-11]). */
+const LG1: f32 = 0.66666662693; /* 0xaaaaaa.0p-24 */
+const LG2: f32 = 0.40000972152; /* 0xccce13.0p-25 */
+const LG3: f32 = 0.28498786688; /* 0x91e9ee.0p-25 */
+const LG4: f32 = 0.24279078841; /* 0xf89e26.0p-26 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn log2f(mut x: f32) -> f32 {
+    let x1p25f = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
+
+    let mut ui: u32 = x.to_bits();
+    let hfsq: f32;
+    let f: f32;
+    let s: f32;
+    let z: f32;
+    let r: f32;
+    let w: f32;
+    let t1: f32;
+    let t2: f32;
+    let mut hi: f32;
+    let lo: f32;
+    let mut ix: u32;
+    let mut k: i32;
+
+    ix = ui;
+    k = 0;
+    if ix < 0x00800000 || (ix >> 31) > 0 {
+        /* x < 2**-126  */
+        if ix << 1 == 0 {
+            return -1. / (x * x); /* log(+-0)=-inf */
+        }
+        if (ix >> 31) > 0 {
+            return (x - x) / 0.0; /* log(-#) = NaN */
+        }
+        /* subnormal number, scale up x */
+        k -= 25;
+        x *= x1p25f;
+        ui = x.to_bits();
+        ix = ui;
+    } else if ix >= 0x7f800000 {
+        return x;
+    } else if ix == 0x3f800000 {
+        return 0.;
+    }
+
+    /* reduce x into [sqrt(2)/2, sqrt(2)] */
+    ix += 0x3f800000 - 0x3f3504f3;
+    k += (ix >> 23) as i32 - 0x7f;
+    ix = (ix & 0x007fffff) + 0x3f3504f3;
+    ui = ix;
+    x = f32::from_bits(ui);
+
+    f = x - 1.0;
+    s = f / (2.0 + f);
+    z = s * s;
+    w = z * z;
+    t1 = w * (LG2 + w * LG4);
+    t2 = z * (LG1 + w * LG3);
+    r = t2 + t1;
+    hfsq = 0.5 * f * f;
+
+    hi = f - hfsq;
+    ui = hi.to_bits();
+    ui &= 0xfffff000;
+    hi = f32::from_bits(ui);
+    lo = f - hi - hfsq + s * (hfsq + r);
+    (lo + hi) * IVLN2LO + lo * IVLN2HI + hi * IVLN2HI + k as f32
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/logf.rs.html b/src/libm/math/logf.rs.html new file mode 100644 index 000000000..51f2203d9 --- /dev/null +++ b/src/libm/math/logf.rs.html @@ -0,0 +1,135 @@ +logf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_logf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+const LN2_HI: f32 = 6.9313812256e-01; /* 0x3f317180 */
+const LN2_LO: f32 = 9.0580006145e-06; /* 0x3717f7d1 */
+/* |(log(1+s)-log(1-s))/s - Lg(s)| < 2**-34.24 (~[-4.95e-11, 4.97e-11]). */
+const LG1: f32 = 0.66666662693; /*  0xaaaaaa.0p-24*/
+const LG2: f32 = 0.40000972152; /*  0xccce13.0p-25 */
+const LG3: f32 = 0.28498786688; /*  0x91e9ee.0p-25 */
+const LG4: f32 = 0.24279078841; /*  0xf89e26.0p-26 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn logf(mut x: f32) -> f32 {
+    let x1p25 = f32::from_bits(0x4c000000); // 0x1p25f === 2 ^ 25
+
+    let mut ix = x.to_bits();
+    let mut k = 0i32;
+
+    if (ix < 0x00800000) || ((ix >> 31) != 0) {
+        /* x < 2**-126  */
+        if ix << 1 == 0 {
+            return -1. / (x * x); /* log(+-0)=-inf */
+        }
+        if (ix >> 31) != 0 {
+            return (x - x) / 0.; /* log(-#) = NaN */
+        }
+        /* subnormal number, scale up x */
+        k -= 25;
+        x *= x1p25;
+        ix = x.to_bits();
+    } else if ix >= 0x7f800000 {
+        return x;
+    } else if ix == 0x3f800000 {
+        return 0.;
+    }
+
+    /* reduce x into [sqrt(2)/2, sqrt(2)] */
+    ix += 0x3f800000 - 0x3f3504f3;
+    k += ((ix >> 23) as i32) - 0x7f;
+    ix = (ix & 0x007fffff) + 0x3f3504f3;
+    x = f32::from_bits(ix);
+
+    let f = x - 1.;
+    let s = f / (2. + f);
+    let z = s * s;
+    let w = z * z;
+    let t1 = w * (LG2 + w * LG4);
+    let t2 = z * (LG1 + w * LG3);
+    let r = t2 + t1;
+    let hfsq = 0.5 * f * f;
+    let dk = k as f32;
+    s * (hfsq + r) + dk * LN2_LO - hfsq + f + dk * LN2_HI
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/mod.rs.html b/src/libm/math/mod.rs.html new file mode 100644 index 000000000..45708dd89 --- /dev/null +++ b/src/libm/math/mod.rs.html @@ -0,0 +1,687 @@ +mod.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+
+macro_rules! force_eval {
+    ($e:expr) => {
+        unsafe {
+            ::core::ptr::read_volatile(&$e);
+        }
+    };
+}
+
+#[cfg(not(feature = "checked"))]
+macro_rules! i {
+    ($array:expr, $index:expr) => {
+        unsafe { *$array.get_unchecked($index) }
+    };
+    ($array:expr, $index:expr, = , $rhs:expr) => {
+        unsafe {
+            *$array.get_unchecked_mut($index) = $rhs;
+        }
+    };
+    ($array:expr, $index:expr, += , $rhs:expr) => {
+        unsafe {
+            *$array.get_unchecked_mut($index) += $rhs;
+        }
+    };
+    ($array:expr, $index:expr, -= , $rhs:expr) => {
+        unsafe {
+            *$array.get_unchecked_mut($index) -= $rhs;
+        }
+    };
+    ($array:expr, $index:expr, &= , $rhs:expr) => {
+        unsafe {
+            *$array.get_unchecked_mut($index) &= $rhs;
+        }
+    };
+    ($array:expr, $index:expr, == , $rhs:expr) => {
+        unsafe { *$array.get_unchecked_mut($index) == $rhs }
+    };
+}
+
+#[cfg(feature = "checked")]
+macro_rules! i {
+    ($array:expr, $index:expr) => {
+        *$array.get($index).unwrap()
+    };
+    ($array:expr, $index:expr, = , $rhs:expr) => {
+        *$array.get_mut($index).unwrap() = $rhs;
+    };
+    ($array:expr, $index:expr, -= , $rhs:expr) => {
+        *$array.get_mut($index).unwrap() -= $rhs;
+    };
+    ($array:expr, $index:expr, += , $rhs:expr) => {
+        *$array.get_mut($index).unwrap() += $rhs;
+    };
+    ($array:expr, $index:expr, &= , $rhs:expr) => {
+        *$array.get_mut($index).unwrap() &= $rhs;
+    };
+    ($array:expr, $index:expr, == , $rhs:expr) => {
+        *$array.get_mut($index).unwrap() == $rhs
+    };
+}
+
+macro_rules! llvm_intrinsically_optimized {
+    (#[cfg($($clause:tt)*)] $e:expr) => {
+        #[cfg(all(not(feature = "stable"), $($clause)*))]
+        {
+            if true { // thwart the dead code lint
+                $e
+            }
+        }
+    };
+}
+
+// Public modules
+mod acos;
+mod acosf;
+mod acosh;
+mod acoshf;
+mod asin;
+mod asinf;
+mod asinh;
+mod asinhf;
+mod atan;
+mod atan2;
+mod atan2f;
+mod atanf;
+mod atanh;
+mod atanhf;
+mod cbrt;
+mod cbrtf;
+mod ceil;
+mod ceilf;
+mod copysign;
+mod copysignf;
+mod cos;
+mod cosf;
+mod cosh;
+mod coshf;
+mod erf;
+mod erff;
+mod exp;
+mod exp10;
+mod exp10f;
+mod exp2;
+mod exp2f;
+mod expf;
+mod expm1;
+mod expm1f;
+mod fabs;
+mod fabsf;
+mod fdim;
+mod fdimf;
+mod floor;
+mod floorf;
+mod fma;
+mod fmaf;
+mod fmax;
+mod fmaxf;
+mod fmin;
+mod fminf;
+mod fmod;
+mod fmodf;
+mod frexp;
+mod frexpf;
+mod hypot;
+mod hypotf;
+mod ilogb;
+mod ilogbf;
+mod j0;
+mod j0f;
+mod j1;
+mod j1f;
+mod jn;
+mod jnf;
+mod ldexp;
+mod ldexpf;
+mod lgamma;
+mod lgamma_r;
+mod lgammaf;
+mod lgammaf_r;
+mod log;
+mod log10;
+mod log10f;
+mod log1p;
+mod log1pf;
+mod log2;
+mod log2f;
+mod logf;
+mod modf;
+mod modff;
+mod pow;
+mod powf;
+mod remquo;
+mod remquof;
+mod round;
+mod roundf;
+mod scalbn;
+mod scalbnf;
+mod sin;
+mod sincos;
+mod sincosf;
+mod sinf;
+mod sinh;
+mod sinhf;
+mod sqrt;
+mod sqrtf;
+mod tan;
+mod tanf;
+mod tanh;
+mod tanhf;
+mod tgamma;
+mod tgammaf;
+mod trunc;
+mod truncf;
+
+// Use separated imports instead of {}-grouped imports for easier merging.
+pub use self::acos::acos;
+pub use self::acosf::acosf;
+pub use self::acosh::acosh;
+pub use self::acoshf::acoshf;
+pub use self::asin::asin;
+pub use self::asinf::asinf;
+pub use self::asinh::asinh;
+pub use self::asinhf::asinhf;
+pub use self::atan::atan;
+pub use self::atan2::atan2;
+pub use self::atan2f::atan2f;
+pub use self::atanf::atanf;
+pub use self::atanh::atanh;
+pub use self::atanhf::atanhf;
+pub use self::cbrt::cbrt;
+pub use self::cbrtf::cbrtf;
+pub use self::ceil::ceil;
+pub use self::ceilf::ceilf;
+pub use self::copysign::copysign;
+pub use self::copysignf::copysignf;
+pub use self::cos::cos;
+pub use self::cosf::cosf;
+pub use self::cosh::cosh;
+pub use self::coshf::coshf;
+pub use self::erf::erf;
+pub use self::erf::erfc;
+pub use self::erff::erfcf;
+pub use self::erff::erff;
+pub use self::exp::exp;
+pub use self::exp10::exp10;
+pub use self::exp10f::exp10f;
+pub use self::exp2::exp2;
+pub use self::exp2f::exp2f;
+pub use self::expf::expf;
+pub use self::expm1::expm1;
+pub use self::expm1f::expm1f;
+pub use self::fabs::fabs;
+pub use self::fabsf::fabsf;
+pub use self::fdim::fdim;
+pub use self::fdimf::fdimf;
+pub use self::floor::floor;
+pub use self::floorf::floorf;
+pub use self::fma::fma;
+pub use self::fmaf::fmaf;
+pub use self::fmax::fmax;
+pub use self::fmaxf::fmaxf;
+pub use self::fmin::fmin;
+pub use self::fminf::fminf;
+pub use self::fmod::fmod;
+pub use self::fmodf::fmodf;
+pub use self::frexp::frexp;
+pub use self::frexpf::frexpf;
+pub use self::hypot::hypot;
+pub use self::hypotf::hypotf;
+pub use self::ilogb::ilogb;
+pub use self::ilogbf::ilogbf;
+pub use self::j0::j0;
+pub use self::j0::y0;
+pub use self::j0f::j0f;
+pub use self::j0f::y0f;
+pub use self::j1::j1;
+pub use self::j1::y1;
+pub use self::j1f::j1f;
+pub use self::j1f::y1f;
+pub use self::jn::jn;
+pub use self::jn::yn;
+pub use self::jnf::jnf;
+pub use self::jnf::ynf;
+pub use self::ldexp::ldexp;
+pub use self::ldexpf::ldexpf;
+pub use self::lgamma::lgamma;
+pub use self::lgamma_r::lgamma_r;
+pub use self::lgammaf::lgammaf;
+pub use self::lgammaf_r::lgammaf_r;
+pub use self::log::log;
+pub use self::log10::log10;
+pub use self::log10f::log10f;
+pub use self::log1p::log1p;
+pub use self::log1pf::log1pf;
+pub use self::log2::log2;
+pub use self::log2f::log2f;
+pub use self::logf::logf;
+pub use self::modf::modf;
+pub use self::modff::modff;
+pub use self::pow::pow;
+pub use self::powf::powf;
+pub use self::remquo::remquo;
+pub use self::remquof::remquof;
+pub use self::round::round;
+pub use self::roundf::roundf;
+pub use self::scalbn::scalbn;
+pub use self::scalbnf::scalbnf;
+pub use self::sin::sin;
+pub use self::sincos::sincos;
+pub use self::sincosf::sincosf;
+pub use self::sinf::sinf;
+pub use self::sinh::sinh;
+pub use self::sinhf::sinhf;
+pub use self::sqrt::sqrt;
+pub use self::sqrtf::sqrtf;
+pub use self::tan::tan;
+pub use self::tanf::tanf;
+pub use self::tanh::tanh;
+pub use self::tanhf::tanhf;
+pub use self::tgamma::tgamma;
+pub use self::tgammaf::tgammaf;
+pub use self::trunc::trunc;
+pub use self::truncf::truncf;
+
+// Private modules
+mod expo2;
+mod fenv;
+mod k_cos;
+mod k_cosf;
+mod k_expo2;
+mod k_expo2f;
+mod k_sin;
+mod k_sinf;
+mod k_tan;
+mod k_tanf;
+mod rem_pio2;
+mod rem_pio2_large;
+mod rem_pio2f;
+
+// Private re-imports
+use self::expo2::expo2;
+use self::k_cos::k_cos;
+use self::k_cosf::k_cosf;
+use self::k_expo2::k_expo2;
+use self::k_expo2f::k_expo2f;
+use self::k_sin::k_sin;
+use self::k_sinf::k_sinf;
+use self::k_tan::k_tan;
+use self::k_tanf::k_tanf;
+use self::rem_pio2::rem_pio2;
+use self::rem_pio2_large::rem_pio2_large;
+use self::rem_pio2f::rem_pio2f;
+
+#[inline]
+fn get_high_word(x: f64) -> u32 {
+    (x.to_bits() >> 32) as u32
+}
+
+#[inline]
+fn get_low_word(x: f64) -> u32 {
+    x.to_bits() as u32
+}
+
+#[inline]
+fn with_set_high_word(f: f64, hi: u32) -> f64 {
+    let mut tmp = f.to_bits();
+    tmp &= 0x00000000_ffffffff;
+    tmp |= (hi as u64) << 32;
+    f64::from_bits(tmp)
+}
+
+#[inline]
+fn with_set_low_word(f: f64, lo: u32) -> f64 {
+    let mut tmp = f.to_bits();
+    tmp &= 0xffffffff_00000000;
+    tmp |= lo as u64;
+    f64::from_bits(tmp)
+}
+
+#[inline]
+fn combine_words(hi: u32, lo: u32) -> f64 {
+    f64::from_bits((hi as u64) << 32 | lo as u64)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/modf.rs.html b/src/libm/math/modf.rs.html new file mode 100644 index 000000000..5853c14e0 --- /dev/null +++ b/src/libm/math/modf.rs.html @@ -0,0 +1,71 @@ +modf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+
+pub fn modf(x: f64) -> (f64, f64) {
+    let rv2: f64;
+    let mut u = x.to_bits();
+    let mask: u64;
+    let e = ((u >> 52 & 0x7ff) as i32) - 0x3ff;
+
+    /* no fractional part */
+    if e >= 52 {
+        rv2 = x;
+        if e == 0x400 && (u << 12) != 0 {
+            /* nan */
+            return (x, rv2);
+        }
+        u &= 1 << 63;
+        return (f64::from_bits(u), rv2);
+    }
+
+    /* no integral part*/
+    if e < 0 {
+        u &= 1 << 63;
+        rv2 = f64::from_bits(u);
+        return (x, rv2);
+    }
+
+    mask = ((!0) >> 12) >> e;
+    if (u & mask) == 0 {
+        rv2 = x;
+        u &= 1 << 63;
+        return (f64::from_bits(u), rv2);
+    }
+    u &= !mask;
+    rv2 = f64::from_bits(u);
+    return (x - rv2, rv2);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/modff.rs.html b/src/libm/math/modff.rs.html new file mode 100644 index 000000000..d40c0af25 --- /dev/null +++ b/src/libm/math/modff.rs.html @@ -0,0 +1,69 @@ +modff.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+
+pub fn modff(x: f32) -> (f32, f32) {
+    let rv2: f32;
+    let mut u: u32 = x.to_bits();
+    let mask: u32;
+    let e = ((u >> 23 & 0xff) as i32) - 0x7f;
+
+    /* no fractional part */
+    if e >= 23 {
+        rv2 = x;
+        if e == 0x80 && (u << 9) != 0 {
+            /* nan */
+            return (x, rv2);
+        }
+        u &= 0x80000000;
+        return (f32::from_bits(u), rv2);
+    }
+    /* no integral part */
+    if e < 0 {
+        u &= 0x80000000;
+        rv2 = f32::from_bits(u);
+        return (x, rv2);
+    }
+
+    mask = 0x007fffff >> e;
+    if (u & mask) == 0 {
+        rv2 = x;
+        u &= 0x80000000;
+        return (f32::from_bits(u), rv2);
+    }
+    u &= !mask;
+    rv2 = f32::from_bits(u);
+    return (x - rv2, rv2);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/pow.rs.html b/src/libm/math/pow.rs.html new file mode 100644 index 000000000..5144161aa --- /dev/null +++ b/src/libm/math/pow.rs.html @@ -0,0 +1,1271 @@ +pow.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+560
+561
+562
+563
+564
+565
+566
+567
+568
+569
+570
+571
+572
+573
+574
+575
+576
+577
+578
+579
+580
+581
+582
+583
+584
+585
+586
+587
+588
+589
+590
+591
+592
+593
+594
+595
+596
+597
+598
+599
+600
+601
+602
+603
+604
+605
+606
+607
+608
+609
+610
+611
+612
+613
+614
+615
+616
+617
+618
+619
+620
+621
+622
+623
+624
+625
+626
+627
+628
+629
+630
+631
+632
+633
+634
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_pow.c */
+/*
+ * ====================================================
+ * Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+// pow(x,y) return x**y
+//
+//                    n
+// Method:  Let x =  2   * (1+f)
+//      1. Compute and return log2(x) in two pieces:
+//              log2(x) = w1 + w2,
+//         where w1 has 53-24 = 29 bit trailing zeros.
+//      2. Perform y*log2(x) = n+y' by simulating muti-precision
+//         arithmetic, where |y'|<=0.5.
+//      3. Return x**y = 2**n*exp(y'*log2)
+//
+// Special cases:
+//      1.  (anything) ** 0  is 1
+//      2.  1 ** (anything)  is 1
+//      3.  (anything except 1) ** NAN is NAN
+//      4.  NAN ** (anything except 0) is NAN
+//      5.  +-(|x| > 1) **  +INF is +INF
+//      6.  +-(|x| > 1) **  -INF is +0
+//      7.  +-(|x| < 1) **  +INF is +0
+//      8.  +-(|x| < 1) **  -INF is +INF
+//      9.  -1          ** +-INF is 1
+//      10. +0 ** (+anything except 0, NAN)               is +0
+//      11. -0 ** (+anything except 0, NAN, odd integer)  is +0
+//      12. +0 ** (-anything except 0, NAN)               is +INF, raise divbyzero
+//      13. -0 ** (-anything except 0, NAN, odd integer)  is +INF, raise divbyzero
+//      14. -0 ** (+odd integer) is -0
+//      15. -0 ** (-odd integer) is -INF, raise divbyzero
+//      16. +INF ** (+anything except 0,NAN) is +INF
+//      17. +INF ** (-anything except 0,NAN) is +0
+//      18. -INF ** (+odd integer) is -INF
+//      19. -INF ** (anything) = -0 ** (-anything), (anything except odd integer)
+//      20. (anything) ** 1 is (anything)
+//      21. (anything) ** -1 is 1/(anything)
+//      22. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer)
+//      23. (-anything except 0 and inf) ** (non-integer) is NAN
+//
+// Accuracy:
+//      pow(x,y) returns x**y nearly rounded. In particular
+//                      pow(integer,integer)
+//      always returns the correct integer provided it is
+//      representable.
+//
+// Constants :
+// The hexadecimal values are the intended ones for the following
+// constants. The decimal values may be used, provided that the
+// compiler will convert from decimal to binary accurately enough
+// to produce the hexadecimal values shown.
+//
+use super::{fabs, get_high_word, scalbn, sqrt, with_set_high_word, with_set_low_word};
+
+const BP: [f64; 2] = [1.0, 1.5];
+const DP_H: [f64; 2] = [0.0, 5.84962487220764160156e-01]; /* 0x3fe2b803_40000000 */
+const DP_L: [f64; 2] = [0.0, 1.35003920212974897128e-08]; /* 0x3E4CFDEB, 0x43CFD006 */
+const TWO53: f64 = 9007199254740992.0; /* 0x43400000_00000000 */
+const HUGE: f64 = 1.0e300;
+const TINY: f64 = 1.0e-300;
+
+// poly coefs for (3/2)*(log(x)-2s-2/3*s**3:
+const L1: f64 = 5.99999999999994648725e-01; /* 0x3fe33333_33333303 */
+const L2: f64 = 4.28571428578550184252e-01; /* 0x3fdb6db6_db6fabff */
+const L3: f64 = 3.33333329818377432918e-01; /* 0x3fd55555_518f264d */
+const L4: f64 = 2.72728123808534006489e-01; /* 0x3fd17460_a91d4101 */
+const L5: f64 = 2.30660745775561754067e-01; /* 0x3fcd864a_93c9db65 */
+const L6: f64 = 2.06975017800338417784e-01; /* 0x3fca7e28_4a454eef */
+const P1: f64 = 1.66666666666666019037e-01; /* 0x3fc55555_5555553e */
+const P2: f64 = -2.77777777770155933842e-03; /* 0xbf66c16c_16bebd93 */
+const P3: f64 = 6.61375632143793436117e-05; /* 0x3f11566a_af25de2c */
+const P4: f64 = -1.65339022054652515390e-06; /* 0xbebbbd41_c5d26bf1 */
+const P5: f64 = 4.13813679705723846039e-08; /* 0x3e663769_72bea4d0 */
+const LG2: f64 = 6.93147180559945286227e-01; /* 0x3fe62e42_fefa39ef */
+const LG2_H: f64 = 6.93147182464599609375e-01; /* 0x3fe62e43_00000000 */
+const LG2_L: f64 = -1.90465429995776804525e-09; /* 0xbe205c61_0ca86c39 */
+const OVT: f64 = 8.0085662595372944372e-017; /* -(1024-log2(ovfl+.5ulp)) */
+const CP: f64 = 9.61796693925975554329e-01; /* 0x3feec709_dc3a03fd =2/(3ln2) */
+const CP_H: f64 = 9.61796700954437255859e-01; /* 0x3feec709_e0000000 =(float)cp */
+const CP_L: f64 = -7.02846165095275826516e-09; /* 0xbe3e2fe0_145b01f5 =tail of cp_h*/
+const IVLN2: f64 = 1.44269504088896338700e+00; /* 0x3ff71547_652b82fe =1/ln2 */
+const IVLN2_H: f64 = 1.44269502162933349609e+00; /* 0x3ff71547_60000000 =24b 1/ln2*/
+const IVLN2_L: f64 = 1.92596299112661746887e-08; /* 0x3e54ae0b_f85ddf44 =1/ln2 tail*/
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn pow(x: f64, y: f64) -> f64 {
+    let t1: f64;
+    let t2: f64;
+
+    let (hx, lx): (i32, u32) = ((x.to_bits() >> 32) as i32, x.to_bits() as u32);
+    let (hy, ly): (i32, u32) = ((y.to_bits() >> 32) as i32, y.to_bits() as u32);
+
+    let mut ix: i32 = (hx & 0x7fffffff) as i32;
+    let iy: i32 = (hy & 0x7fffffff) as i32;
+
+    /* x**0 = 1, even if x is NaN */
+    if ((iy as u32) | ly) == 0 {
+        return 1.0;
+    }
+
+    /* 1**y = 1, even if y is NaN */
+    if hx == 0x3ff00000 && lx == 0 {
+        return 1.0;
+    }
+
+    /* NaN if either arg is NaN */
+    if ix > 0x7ff00000
+        || (ix == 0x7ff00000 && lx != 0)
+        || iy > 0x7ff00000
+        || (iy == 0x7ff00000 && ly != 0)
+    {
+        return x + y;
+    }
+
+    /* determine if y is an odd int when x < 0
+     * yisint = 0       ... y is not an integer
+     * yisint = 1       ... y is an odd int
+     * yisint = 2       ... y is an even int
+     */
+    let mut yisint: i32 = 0;
+    let mut k: i32;
+    let mut j: i32;
+    if hx < 0 {
+        if iy >= 0x43400000 {
+            yisint = 2; /* even integer y */
+        } else if iy >= 0x3ff00000 {
+            k = (iy >> 20) - 0x3ff; /* exponent */
+
+            if k > 20 {
+                j = (ly >> (52 - k)) as i32;
+
+                if (j << (52 - k)) == (ly as i32) {
+                    yisint = 2 - (j & 1);
+                }
+            } else if ly == 0 {
+                j = iy >> (20 - k);
+
+                if (j << (20 - k)) == iy {
+                    yisint = 2 - (j & 1);
+                }
+            }
+        }
+    }
+
+    if ly == 0 {
+        /* special value of y */
+        if iy == 0x7ff00000 {
+            /* y is +-inf */
+
+            return if ((ix - 0x3ff00000) | (lx as i32)) == 0 {
+                /* (-1)**+-inf is 1 */
+                1.0
+            } else if ix >= 0x3ff00000 {
+                /* (|x|>1)**+-inf = inf,0 */
+                if hy >= 0 {
+                    y
+                } else {
+                    0.0
+                }
+            } else {
+                /* (|x|<1)**+-inf = 0,inf */
+                if hy >= 0 {
+                    0.0
+                } else {
+                    -y
+                }
+            };
+        }
+
+        if iy == 0x3ff00000 {
+            /* y is +-1 */
+            return if hy >= 0 { x } else { 1.0 / x };
+        }
+
+        if hy == 0x40000000 {
+            /* y is 2 */
+            return x * x;
+        }
+
+        if hy == 0x3fe00000 {
+            /* y is 0.5 */
+            if hx >= 0 {
+                /* x >= +0 */
+                return sqrt(x);
+            }
+        }
+    }
+
+    let mut ax: f64 = fabs(x);
+    if lx == 0 {
+        /* special value of x */
+        if ix == 0x7ff00000 || ix == 0 || ix == 0x3ff00000 {
+            /* x is +-0,+-inf,+-1 */
+            let mut z: f64 = ax;
+
+            if hy < 0 {
+                /* z = (1/|x|) */
+                z = 1.0 / z;
+            }
+
+            if hx < 0 {
+                if ((ix - 0x3ff00000) | yisint) == 0 {
+                    z = (z - z) / (z - z); /* (-1)**non-int is NaN */
+                } else if yisint == 1 {
+                    z = -z; /* (x<0)**odd = -(|x|**odd) */
+                }
+            }
+
+            return z;
+        }
+    }
+
+    let mut s: f64 = 1.0; /* sign of result */
+    if hx < 0 {
+        if yisint == 0 {
+            /* (x<0)**(non-int) is NaN */
+            return (x - x) / (x - x);
+        }
+
+        if yisint == 1 {
+            /* (x<0)**(odd int) */
+            s = -1.0;
+        }
+    }
+
+    /* |y| is HUGE */
+    if iy > 0x41e00000 {
+        /* if |y| > 2**31 */
+        if iy > 0x43f00000 {
+            /* if |y| > 2**64, must o/uflow */
+            if ix <= 0x3fefffff {
+                return if hy < 0 { HUGE * HUGE } else { TINY * TINY };
+            }
+
+            if ix >= 0x3ff00000 {
+                return if hy > 0 { HUGE * HUGE } else { TINY * TINY };
+            }
+        }
+
+        /* over/underflow if x is not close to one */
+        if ix < 0x3fefffff {
+            return if hy < 0 {
+                s * HUGE * HUGE
+            } else {
+                s * TINY * TINY
+            };
+        }
+        if ix > 0x3ff00000 {
+            return if hy > 0 {
+                s * HUGE * HUGE
+            } else {
+                s * TINY * TINY
+            };
+        }
+
+        /* now |1-x| is TINY <= 2**-20, suffice to compute
+        log(x) by x-x^2/2+x^3/3-x^4/4 */
+        let t: f64 = ax - 1.0; /* t has 20 trailing zeros */
+        let w: f64 = (t * t) * (0.5 - t * (0.3333333333333333333333 - t * 0.25));
+        let u: f64 = IVLN2_H * t; /* ivln2_h has 21 sig. bits */
+        let v: f64 = t * IVLN2_L - w * IVLN2;
+        t1 = with_set_low_word(u + v, 0);
+        t2 = v - (t1 - u);
+    } else {
+        // double ss,s2,s_h,s_l,t_h,t_l;
+        let mut n: i32 = 0;
+
+        if ix < 0x00100000 {
+            /* take care subnormal number */
+            ax *= TWO53;
+            n -= 53;
+            ix = get_high_word(ax) as i32;
+        }
+
+        n += (ix >> 20) - 0x3ff;
+        j = ix & 0x000fffff;
+
+        /* determine interval */
+        let k: i32;
+        ix = j | 0x3ff00000; /* normalize ix */
+        if j <= 0x3988E {
+            /* |x|<sqrt(3/2) */
+            k = 0;
+        } else if j < 0xBB67A {
+            /* |x|<sqrt(3)   */
+            k = 1;
+        } else {
+            k = 0;
+            n += 1;
+            ix -= 0x00100000;
+        }
+        ax = with_set_high_word(ax, ix as u32);
+
+        /* compute ss = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+        let u: f64 = ax - BP[k as usize]; /* bp[0]=1.0, bp[1]=1.5 */
+        let v: f64 = 1.0 / (ax + BP[k as usize]);
+        let ss: f64 = u * v;
+        let s_h = with_set_low_word(ss, 0);
+
+        /* t_h=ax+bp[k] High */
+        let t_h: f64 = with_set_high_word(
+            0.0,
+            ((ix as u32 >> 1) | 0x20000000) + 0x00080000 + ((k as u32) << 18),
+        );
+        let t_l: f64 = ax - (t_h - BP[k as usize]);
+        let s_l: f64 = v * ((u - s_h * t_h) - s_h * t_l);
+
+        /* compute log(ax) */
+        let s2: f64 = ss * ss;
+        let mut r: f64 = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6)))));
+        r += s_l * (s_h + ss);
+        let s2: f64 = s_h * s_h;
+        let t_h: f64 = with_set_low_word(3.0 + s2 + r, 0);
+        let t_l: f64 = r - ((t_h - 3.0) - s2);
+
+        /* u+v = ss*(1+...) */
+        let u: f64 = s_h * t_h;
+        let v: f64 = s_l * t_h + t_l * ss;
+
+        /* 2/(3log2)*(ss+...) */
+        let p_h: f64 = with_set_low_word(u + v, 0);
+        let p_l = v - (p_h - u);
+        let z_h: f64 = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */
+        let z_l: f64 = CP_L * p_h + p_l * CP + DP_L[k as usize];
+
+        /* log2(ax) = (ss+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+        let t: f64 = n as f64;
+        t1 = with_set_low_word(((z_h + z_l) + DP_H[k as usize]) + t, 0);
+        t2 = z_l - (((t1 - t) - DP_H[k as usize]) - z_h);
+    }
+
+    /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
+    let y1: f64 = with_set_low_word(y, 0);
+    let p_l: f64 = (y - y1) * t1 + y * t2;
+    let mut p_h: f64 = y1 * t1;
+    let z: f64 = p_l + p_h;
+    let mut j: i32 = (z.to_bits() >> 32) as i32;
+    let i: i32 = z.to_bits() as i32;
+    // let (j, i): (i32, i32) = ((z.to_bits() >> 32) as i32, z.to_bits() as i32);
+
+    if j >= 0x40900000 {
+        /* z >= 1024 */
+        if (j - 0x40900000) | i != 0 {
+            /* if z > 1024 */
+            return s * HUGE * HUGE; /* overflow */
+        }
+
+        if p_l + OVT > z - p_h {
+            return s * HUGE * HUGE; /* overflow */
+        }
+    } else if (j & 0x7fffffff) >= 0x4090cc00 {
+        /* z <= -1075 */
+        // FIXME: instead of abs(j) use unsigned j
+
+        if (((j as u32) - 0xc090cc00) | (i as u32)) != 0 {
+            /* z < -1075 */
+            return s * TINY * TINY; /* underflow */
+        }
+
+        if p_l <= z - p_h {
+            return s * TINY * TINY; /* underflow */
+        }
+    }
+
+    /* compute 2**(p_h+p_l) */
+    let i: i32 = j & (0x7fffffff as i32);
+    k = (i >> 20) - 0x3ff;
+    let mut n: i32 = 0;
+
+    if i > 0x3fe00000 {
+        /* if |z| > 0.5, set n = [z+0.5] */
+        n = j + (0x00100000 >> (k + 1));
+        k = ((n & 0x7fffffff) >> 20) - 0x3ff; /* new k for n */
+        let t: f64 = with_set_high_word(0.0, (n & !(0x000fffff >> k)) as u32);
+        n = ((n & 0x000fffff) | 0x00100000) >> (20 - k);
+        if j < 0 {
+            n = -n;
+        }
+        p_h -= t;
+    }
+
+    let t: f64 = with_set_low_word(p_l + p_h, 0);
+    let u: f64 = t * LG2_H;
+    let v: f64 = (p_l - (t - p_h)) * LG2 + t * LG2_L;
+    let mut z: f64 = u + v;
+    let w: f64 = v - (z - u);
+    let t: f64 = z * z;
+    let t1: f64 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
+    let r: f64 = (z * t1) / (t1 - 2.0) - (w + z * w);
+    z = 1.0 - (r - z);
+    j = get_high_word(z) as i32;
+    j += n << 20;
+
+    if (j >> 20) <= 0 {
+        /* subnormal output */
+        z = scalbn(z, n);
+    } else {
+        z = with_set_high_word(z, j as u32);
+    }
+
+    s * z
+}
+
+#[cfg(test)]
+mod tests {
+    extern crate core;
+
+    use self::core::f64::consts::{E, PI};
+    use self::core::f64::{EPSILON, INFINITY, MAX, MIN, MIN_POSITIVE, NAN, NEG_INFINITY};
+    use super::pow;
+
+    const POS_ZERO: &[f64] = &[0.0];
+    const NEG_ZERO: &[f64] = &[-0.0];
+    const POS_ONE: &[f64] = &[1.0];
+    const NEG_ONE: &[f64] = &[-1.0];
+    const POS_FLOATS: &[f64] = &[99.0 / 70.0, E, PI];
+    const NEG_FLOATS: &[f64] = &[-99.0 / 70.0, -E, -PI];
+    const POS_SMALL_FLOATS: &[f64] = &[(1.0 / 2.0), MIN_POSITIVE, EPSILON];
+    const NEG_SMALL_FLOATS: &[f64] = &[-(1.0 / 2.0), -MIN_POSITIVE, -EPSILON];
+    const POS_EVENS: &[f64] = &[2.0, 6.0, 8.0, 10.0, 22.0, 100.0, MAX];
+    const NEG_EVENS: &[f64] = &[MIN, -100.0, -22.0, -10.0, -8.0, -6.0, -2.0];
+    const POS_ODDS: &[f64] = &[3.0, 7.0];
+    const NEG_ODDS: &[f64] = &[-7.0, -3.0];
+    const NANS: &[f64] = &[NAN];
+    const POS_INF: &[f64] = &[INFINITY];
+    const NEG_INF: &[f64] = &[NEG_INFINITY];
+
+    const ALL: &[&[f64]] = &[
+        POS_ZERO,
+        NEG_ZERO,
+        NANS,
+        NEG_SMALL_FLOATS,
+        POS_SMALL_FLOATS,
+        NEG_FLOATS,
+        POS_FLOATS,
+        NEG_EVENS,
+        POS_EVENS,
+        NEG_ODDS,
+        POS_ODDS,
+        NEG_INF,
+        POS_INF,
+        NEG_ONE,
+        POS_ONE,
+    ];
+    const POS: &[&[f64]] = &[POS_ZERO, POS_ODDS, POS_ONE, POS_FLOATS, POS_EVENS, POS_INF];
+    const NEG: &[&[f64]] = &[NEG_ZERO, NEG_ODDS, NEG_ONE, NEG_FLOATS, NEG_EVENS, NEG_INF];
+
+    fn pow_test(base: f64, exponent: f64, expected: f64) {
+        let res = pow(base, exponent);
+        assert!(
+            if expected.is_nan() {
+                res.is_nan()
+            } else {
+                pow(base, exponent) == expected
+            },
+            "{} ** {} was {} instead of {}",
+            base,
+            exponent,
+            res,
+            expected
+        );
+    }
+
+    fn test_sets_as_base(sets: &[&[f64]], exponent: f64, expected: f64) {
+        sets.iter()
+            .for_each(|s| s.iter().for_each(|val| pow_test(*val, exponent, expected)));
+    }
+
+    fn test_sets_as_exponent(base: f64, sets: &[&[f64]], expected: f64) {
+        sets.iter()
+            .for_each(|s| s.iter().for_each(|val| pow_test(base, *val, expected)));
+    }
+
+    fn test_sets(sets: &[&[f64]], computed: &Fn(f64) -> f64, expected: &Fn(f64) -> f64) {
+        sets.iter().for_each(|s| {
+            s.iter().for_each(|val| {
+                let exp = expected(*val);
+                let res = computed(*val);
+
+                assert!(
+                    if exp.is_nan() {
+                        res.is_nan()
+                    } else {
+                        exp == res
+                    },
+                    "test for {} was {} instead of {}",
+                    val,
+                    res,
+                    exp
+                );
+            })
+        });
+    }
+
+    #[test]
+    fn zero_as_exponent() {
+        test_sets_as_base(ALL, 0.0, 1.0);
+        test_sets_as_base(ALL, -0.0, 1.0);
+    }
+
+    #[test]
+    fn one_as_base() {
+        test_sets_as_exponent(1.0, ALL, 1.0);
+    }
+
+    #[test]
+    fn nan_inputs() {
+        // NAN as the base:
+        // (NAN ^ anything *but 0* should be NAN)
+        test_sets_as_exponent(NAN, &ALL[2..], NAN);
+
+        // NAN as the exponent:
+        // (anything *but 1* ^ NAN should be NAN)
+        test_sets_as_base(&ALL[..(ALL.len() - 2)], NAN, NAN);
+    }
+
+    #[test]
+    fn infinity_as_base() {
+        // Positive Infinity as the base:
+        // (+Infinity ^ positive anything but 0 and NAN should be +Infinity)
+        test_sets_as_exponent(INFINITY, &POS[1..], INFINITY);
+
+        // (+Infinity ^ negative anything except 0 and NAN should be 0.0)
+        test_sets_as_exponent(INFINITY, &NEG[1..], 0.0);
+
+        // Negative Infinity as the base:
+        // (-Infinity ^ positive odd ints should be -Infinity)
+        test_sets_as_exponent(NEG_INFINITY, &[POS_ODDS], NEG_INFINITY);
+
+        // (-Infinity ^ anything but odd ints should be == -0 ^ (-anything))
+        // We can lump in pos/neg odd ints here because they don't seem to
+        // cause panics (div by zero) in release mode (I think).
+        test_sets(ALL, &|v: f64| pow(NEG_INFINITY, v), &|v: f64| pow(-0.0, -v));
+    }
+
+    #[test]
+    fn infinity_as_exponent() {
+        // Positive/Negative base greater than 1:
+        // (pos/neg > 1 ^ Infinity should be Infinity - note this excludes NAN as the base)
+        test_sets_as_base(&ALL[5..(ALL.len() - 2)], INFINITY, INFINITY);
+
+        // (pos/neg > 1 ^ -Infinity should be 0.0)
+        test_sets_as_base(&ALL[5..ALL.len() - 2], NEG_INFINITY, 0.0);
+
+        // Positive/Negative base less than 1:
+        let base_below_one = &[POS_ZERO, NEG_ZERO, NEG_SMALL_FLOATS, POS_SMALL_FLOATS];
+
+        // (pos/neg < 1 ^ Infinity should be 0.0 - this also excludes NAN as the base)
+        test_sets_as_base(base_below_one, INFINITY, 0.0);
+
+        // (pos/neg < 1 ^ -Infinity should be Infinity)
+        test_sets_as_base(base_below_one, NEG_INFINITY, INFINITY);
+
+        // Positive/Negative 1 as the base:
+        // (pos/neg 1 ^ Infinity should be 1)
+        test_sets_as_base(&[NEG_ONE, POS_ONE], INFINITY, 1.0);
+
+        // (pos/neg 1 ^ -Infinity should be 1)
+        test_sets_as_base(&[NEG_ONE, POS_ONE], NEG_INFINITY, 1.0);
+    }
+
+    #[test]
+    fn zero_as_base() {
+        // Positive Zero as the base:
+        // (+0 ^ anything positive but 0 and NAN should be +0)
+        test_sets_as_exponent(0.0, &POS[1..], 0.0);
+
+        // (+0 ^ anything negative but 0 and NAN should be Infinity)
+        // (this should panic because we're dividing by zero)
+        test_sets_as_exponent(0.0, &NEG[1..], INFINITY);
+
+        // Negative Zero as the base:
+        // (-0 ^ anything positive but 0, NAN, and odd ints should be +0)
+        test_sets_as_exponent(-0.0, &POS[3..], 0.0);
+
+        // (-0 ^ anything negative but 0, NAN, and odd ints should be Infinity)
+        // (should panic because of divide by zero)
+        test_sets_as_exponent(-0.0, &NEG[3..], INFINITY);
+
+        // (-0 ^ positive odd ints should be -0)
+        test_sets_as_exponent(-0.0, &[POS_ODDS], -0.0);
+
+        // (-0 ^ negative odd ints should be -Infinity)
+        // (should panic because of divide by zero)
+        test_sets_as_exponent(-0.0, &[NEG_ODDS], NEG_INFINITY);
+    }
+
+    #[test]
+    fn special_cases() {
+        // One as the exponent:
+        // (anything ^ 1 should be anything - i.e. the base)
+        test_sets(ALL, &|v: f64| pow(v, 1.0), &|v: f64| v);
+
+        // Negative One as the exponent:
+        // (anything ^ -1 should be 1/anything)
+        test_sets(ALL, &|v: f64| pow(v, -1.0), &|v: f64| 1.0 / v);
+
+        // Factoring -1 out:
+        // (negative anything ^ integer should be (-1 ^ integer) * (positive anything ^ integer))
+        &[POS_ZERO, NEG_ZERO, POS_ONE, NEG_ONE, POS_EVENS, NEG_EVENS]
+            .iter()
+            .for_each(|int_set| {
+                int_set.iter().for_each(|int| {
+                    test_sets(ALL, &|v: f64| pow(-v, *int), &|v: f64| {
+                        pow(-1.0, *int) * pow(v, *int)
+                    });
+                })
+            });
+
+        // Negative base (imaginary results):
+        // (-anything except 0 and Infinity ^ non-integer should be NAN)
+        &NEG[1..(NEG.len() - 1)].iter().for_each(|set| {
+            set.iter().for_each(|val| {
+                test_sets(&ALL[3..7], &|v: f64| pow(*val, v), &|_| NAN);
+            })
+        });
+    }
+
+    #[test]
+    fn normal_cases() {
+        assert_eq!(pow(2.0, 20.0), (1 << 20) as f64);
+        assert_eq!(pow(-1.0, 9.0), -1.0);
+        assert!(pow(-1.0, 2.2).is_nan());
+        assert!(pow(-1.0, -1.14).is_nan());
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/powf.rs.html b/src/libm/math/powf.rs.html new file mode 100644 index 000000000..3d149dc0e --- /dev/null +++ b/src/libm/math/powf.rs.html @@ -0,0 +1,689 @@ +powf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_powf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{fabsf, scalbnf, sqrtf};
+
+const BP: [f32; 2] = [1.0, 1.5];
+const DP_H: [f32; 2] = [0.0, 5.84960938e-01]; /* 0x3f15c000 */
+const DP_L: [f32; 2] = [0.0, 1.56322085e-06]; /* 0x35d1cfdc */
+const TWO24: f32 = 16777216.0; /* 0x4b800000 */
+const HUGE: f32 = 1.0e30;
+const TINY: f32 = 1.0e-30;
+const L1: f32 = 6.0000002384e-01; /* 0x3f19999a */
+const L2: f32 = 4.2857143283e-01; /* 0x3edb6db7 */
+const L3: f32 = 3.3333334327e-01; /* 0x3eaaaaab */
+const L4: f32 = 2.7272811532e-01; /* 0x3e8ba305 */
+const L5: f32 = 2.3066075146e-01; /* 0x3e6c3255 */
+const L6: f32 = 2.0697501302e-01; /* 0x3e53f142 */
+const P1: f32 = 1.6666667163e-01; /* 0x3e2aaaab */
+const P2: f32 = -2.7777778450e-03; /* 0xbb360b61 */
+const P3: f32 = 6.6137559770e-05; /* 0x388ab355 */
+const P4: f32 = -1.6533901999e-06; /* 0xb5ddea0e */
+const P5: f32 = 4.1381369442e-08; /* 0x3331bb4c */
+const LG2: f32 = 6.9314718246e-01; /* 0x3f317218 */
+const LG2_H: f32 = 6.93145752e-01; /* 0x3f317200 */
+const LG2_L: f32 = 1.42860654e-06; /* 0x35bfbe8c */
+const OVT: f32 = 4.2995665694e-08; /* -(128-log2(ovfl+.5ulp)) */
+const CP: f32 = 9.6179670095e-01; /* 0x3f76384f =2/(3ln2) */
+const CP_H: f32 = 9.6191406250e-01; /* 0x3f764000 =12b cp */
+const CP_L: f32 = -1.1736857402e-04; /* 0xb8f623c6 =tail of cp_h */
+const IVLN2: f32 = 1.4426950216e+00;
+const IVLN2_H: f32 = 1.4426879883e+00;
+const IVLN2_L: f32 = 7.0526075433e-06;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn powf(x: f32, y: f32) -> f32 {
+    let mut z: f32;
+    let mut ax: f32;
+    let z_h: f32;
+    let z_l: f32;
+    let mut p_h: f32;
+    let mut p_l: f32;
+    let y1: f32;
+    let mut t1: f32;
+    let t2: f32;
+    let mut r: f32;
+    let s: f32;
+    let mut sn: f32;
+    let mut t: f32;
+    let mut u: f32;
+    let mut v: f32;
+    let mut w: f32;
+    let i: i32;
+    let mut j: i32;
+    let mut k: i32;
+    let mut yisint: i32;
+    let mut n: i32;
+    let hx: i32;
+    let hy: i32;
+    let mut ix: i32;
+    let iy: i32;
+    let mut is: i32;
+
+    hx = x.to_bits() as i32;
+    hy = y.to_bits() as i32;
+
+    ix = hx & 0x7fffffff;
+    iy = hy & 0x7fffffff;
+
+    /* x**0 = 1, even if x is NaN */
+    if iy == 0 {
+        return 1.0;
+    }
+
+    /* 1**y = 1, even if y is NaN */
+    if hx == 0x3f800000 {
+        return 1.0;
+    }
+
+    /* NaN if either arg is NaN */
+    if ix > 0x7f800000 || iy > 0x7f800000 {
+        return x + y;
+    }
+
+    /* determine if y is an odd int when x < 0
+     * yisint = 0       ... y is not an integer
+     * yisint = 1       ... y is an odd int
+     * yisint = 2       ... y is an even int
+     */
+    yisint = 0;
+    if hx < 0 {
+        if iy >= 0x4b800000 {
+            yisint = 2; /* even integer y */
+        } else if iy >= 0x3f800000 {
+            k = (iy >> 23) - 0x7f; /* exponent */
+            j = iy >> (23 - k);
+            if (j << (23 - k)) == iy {
+                yisint = 2 - (j & 1);
+            }
+        }
+    }
+
+    /* special value of y */
+    if iy == 0x7f800000 {
+        /* y is +-inf */
+        if ix == 0x3f800000 {
+            /* (-1)**+-inf is 1 */
+            return 1.0;
+        } else if ix > 0x3f800000 {
+            /* (|x|>1)**+-inf = inf,0 */
+            return if hy >= 0 { y } else { 0.0 };
+        } else {
+            /* (|x|<1)**+-inf = 0,inf */
+            return if hy >= 0 { 0.0 } else { -y };
+        }
+    }
+    if iy == 0x3f800000 {
+        /* y is +-1 */
+        return if hy >= 0 { x } else { 1.0 / x };
+    }
+
+    if hy == 0x40000000 {
+        /* y is 2 */
+        return x * x;
+    }
+
+    if hy == 0x3f000000
+       /* y is  0.5 */
+       && hx >= 0
+    {
+        /* x >= +0 */
+        return sqrtf(x);
+    }
+
+    ax = fabsf(x);
+    /* special value of x */
+    if ix == 0x7f800000 || ix == 0 || ix == 0x3f800000 {
+        /* x is +-0,+-inf,+-1 */
+        z = ax;
+        if hy < 0 {
+            /* z = (1/|x|) */
+            z = 1.0 / z;
+        }
+
+        if hx < 0 {
+            if ((ix - 0x3f800000) | yisint) == 0 {
+                z = (z - z) / (z - z); /* (-1)**non-int is NaN */
+            } else if yisint == 1 {
+                z = -z; /* (x<0)**odd = -(|x|**odd) */
+            }
+        }
+        return z;
+    }
+
+    sn = 1.0; /* sign of result */
+    if hx < 0 {
+        if yisint == 0 {
+            /* (x<0)**(non-int) is NaN */
+            return (x - x) / (x - x);
+        }
+
+        if yisint == 1 {
+            /* (x<0)**(odd int) */
+            sn = -1.0;
+        }
+    }
+
+    /* |y| is HUGE */
+    if iy > 0x4d000000 {
+        /* if |y| > 2**27 */
+        /* over/underflow if x is not close to one */
+        if ix < 0x3f7ffff8 {
+            return if hy < 0 {
+                sn * HUGE * HUGE
+            } else {
+                sn * TINY * TINY
+            };
+        }
+
+        if ix > 0x3f800007 {
+            return if hy > 0 {
+                sn * HUGE * HUGE
+            } else {
+                sn * TINY * TINY
+            };
+        }
+
+        /* now |1-x| is TINY <= 2**-20, suffice to compute
+        log(x) by x-x^2/2+x^3/3-x^4/4 */
+        t = ax - 1.; /* t has 20 trailing zeros */
+        w = (t * t) * (0.5 - t * (0.333333333333 - t * 0.25));
+        u = IVLN2_H * t; /* IVLN2_H has 16 sig. bits */
+        v = t * IVLN2_L - w * IVLN2;
+        t1 = u + v;
+        is = t1.to_bits() as i32;
+        t1 = f32::from_bits(is as u32 & 0xfffff000);
+        t2 = v - (t1 - u);
+    } else {
+        let mut s2: f32;
+        let mut s_h: f32;
+        let s_l: f32;
+        let mut t_h: f32;
+        let mut t_l: f32;
+
+        n = 0;
+        /* take care subnormal number */
+        if ix < 0x00800000 {
+            ax *= TWO24;
+            n -= 24;
+            ix = ax.to_bits() as i32;
+        }
+        n += ((ix) >> 23) - 0x7f;
+        j = ix & 0x007fffff;
+        /* determine interval */
+        ix = j | 0x3f800000; /* normalize ix */
+        if j <= 0x1cc471 {
+            /* |x|<sqrt(3/2) */
+            k = 0;
+        } else if j < 0x5db3d7 {
+            /* |x|<sqrt(3)   */
+            k = 1;
+        } else {
+            k = 0;
+            n += 1;
+            ix -= 0x00800000;
+        }
+        ax = f32::from_bits(ix as u32);
+
+        /* compute s = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+        u = ax - BP[k as usize]; /* bp[0]=1.0, bp[1]=1.5 */
+        v = 1.0 / (ax + BP[k as usize]);
+        s = u * v;
+        s_h = s;
+        is = s_h.to_bits() as i32;
+        s_h = f32::from_bits(is as u32 & 0xfffff000);
+        /* t_h=ax+bp[k] High */
+        is = (((ix as u32 >> 1) & 0xfffff000) | 0x20000000) as i32;
+        t_h = f32::from_bits(is as u32 + 0x00400000 + ((k as u32) << 21));
+        t_l = ax - (t_h - BP[k as usize]);
+        s_l = v * ((u - s_h * t_h) - s_h * t_l);
+        /* compute log(ax) */
+        s2 = s * s;
+        r = s2 * s2 * (L1 + s2 * (L2 + s2 * (L3 + s2 * (L4 + s2 * (L5 + s2 * L6)))));
+        r += s_l * (s_h + s);
+        s2 = s_h * s_h;
+        t_h = 3.0 + s2 + r;
+        is = t_h.to_bits() as i32;
+        t_h = f32::from_bits(is as u32 & 0xfffff000);
+        t_l = r - ((t_h - 3.0) - s2);
+        /* u+v = s*(1+...) */
+        u = s_h * t_h;
+        v = s_l * t_h + t_l * s;
+        /* 2/(3log2)*(s+...) */
+        p_h = u + v;
+        is = p_h.to_bits() as i32;
+        p_h = f32::from_bits(is as u32 & 0xfffff000);
+        p_l = v - (p_h - u);
+        z_h = CP_H * p_h; /* cp_h+cp_l = 2/(3*log2) */
+        z_l = CP_L * p_h + p_l * CP + DP_L[k as usize];
+        /* log2(ax) = (s+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+        t = n as f32;
+        t1 = ((z_h + z_l) + DP_H[k as usize]) + t;
+        is = t1.to_bits() as i32;
+        t1 = f32::from_bits(is as u32 & 0xfffff000);
+        t2 = z_l - (((t1 - t) - DP_H[k as usize]) - z_h);
+    };
+
+    /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
+    is = y.to_bits() as i32;
+    y1 = f32::from_bits(is as u32 & 0xfffff000);
+    p_l = (y - y1) * t1 + y * t2;
+    p_h = y1 * t1;
+    z = p_l + p_h;
+    j = z.to_bits() as i32;
+    if j > 0x43000000 {
+        /* if z > 128 */
+        return sn * HUGE * HUGE; /* overflow */
+    } else if j == 0x43000000 {
+        /* if z == 128 */
+        if p_l + OVT > z - p_h {
+            return sn * HUGE * HUGE; /* overflow */
+        }
+    } else if (j & 0x7fffffff) > 0x43160000 {
+        /* z < -150 */
+        // FIXME: check should be  (uint32_t)j > 0xc3160000
+        return sn * TINY * TINY; /* underflow */
+    } else if j as u32 == 0xc3160000
+              /* z == -150 */
+              && p_l <= z - p_h
+    {
+        return sn * TINY * TINY; /* underflow */
+    }
+
+    /*
+     * compute 2**(p_h+p_l)
+     */
+    i = j & 0x7fffffff;
+    k = (i >> 23) - 0x7f;
+    n = 0;
+    if i > 0x3f000000 {
+        /* if |z| > 0.5, set n = [z+0.5] */
+        n = j + (0x00800000 >> (k + 1));
+        k = ((n & 0x7fffffff) >> 23) - 0x7f; /* new k for n */
+        t = f32::from_bits(n as u32 & !(0x007fffff >> k));
+        n = ((n & 0x007fffff) | 0x00800000) >> (23 - k);
+        if j < 0 {
+            n = -n;
+        }
+        p_h -= t;
+    }
+    t = p_l + p_h;
+    is = t.to_bits() as i32;
+    t = f32::from_bits(is as u32 & 0xffff8000);
+    u = t * LG2_H;
+    v = (p_l - (t - p_h)) * LG2 + t * LG2_L;
+    z = u + v;
+    w = v - (z - u);
+    t = z * z;
+    t1 = z - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
+    r = (z * t1) / (t1 - 2.0) - (w + z * w);
+    z = 1.0 - (r - z);
+    j = z.to_bits() as i32;
+    j += n << 23;
+    if (j >> 23) <= 0 {
+        /* subnormal output */
+        z = scalbnf(z, n);
+    } else {
+        z = f32::from_bits(j as u32);
+    }
+    sn * z
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/rem_pio2.rs.html b/src/libm/math/rem_pio2.rs.html new file mode 100644 index 000000000..2a523fd14 --- /dev/null +++ b/src/libm/math/rem_pio2.rs.html @@ -0,0 +1,417 @@ +rem_pio2.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+
+// origin: FreeBSD /usr/src/lib/msun/src/e_rem_pio2.c
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// Optimized by Bruce D. Evans. */
+use super::rem_pio2_large;
+
+// #if FLT_EVAL_METHOD==0 || FLT_EVAL_METHOD==1
+// #define EPS DBL_EPSILON
+const EPS: f64 = 2.2204460492503131e-16;
+// #elif FLT_EVAL_METHOD==2
+// #define EPS LDBL_EPSILON
+// #endif
+
+// TODO: Support FLT_EVAL_METHOD?
+
+const TO_INT: f64 = 1.5 / EPS;
+/// 53 bits of 2/pi
+const INV_PIO2: f64 = 6.36619772367581382433e-01; /* 0x3FE45F30, 0x6DC9C883 */
+/// first 33 bits of pi/2
+const PIO2_1: f64 = 1.57079632673412561417e+00; /* 0x3FF921FB, 0x54400000 */
+/// pi/2 - PIO2_1
+const PIO2_1T: f64 = 6.07710050650619224932e-11; /* 0x3DD0B461, 0x1A626331 */
+/// second 33 bits of pi/2
+const PIO2_2: f64 = 6.07710050630396597660e-11; /* 0x3DD0B461, 0x1A600000 */
+/// pi/2 - (PIO2_1+PIO2_2)
+const PIO2_2T: f64 = 2.02226624879595063154e-21; /* 0x3BA3198A, 0x2E037073 */
+/// third 33 bits of pi/2
+const PIO2_3: f64 = 2.02226624871116645580e-21; /* 0x3BA3198A, 0x2E000000 */
+/// pi/2 - (PIO2_1+PIO2_2+PIO2_3)
+const PIO2_3T: f64 = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
+
+// return the remainder of x rem pi/2 in y[0]+y[1]
+// use rem_pio2_large() for large x
+//
+// caller must handle the case when reduction is not needed: |x| ~<= pi/4 */
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn rem_pio2(x: f64) -> (i32, f64, f64) {
+    let x1p24 = f64::from_bits(0x4170000000000000);
+
+    let sign = (f64::to_bits(x) >> 63) as i32;
+    let ix = (f64::to_bits(x) >> 32) as u32 & 0x7fffffff;
+
+    #[inline]
+    fn medium(x: f64, ix: u32) -> (i32, f64, f64) {
+        /* rint(x/(pi/2)), Assume round-to-nearest. */
+        let f_n = x as f64 * INV_PIO2 + TO_INT - TO_INT;
+        let n = f_n as i32;
+        let mut r = x - f_n * PIO2_1;
+        let mut w = f_n * PIO2_1T; /* 1st round, good to 85 bits */
+        let mut y0 = r - w;
+        let ui = f64::to_bits(y0);
+        let ey = (ui >> 52) as i32 & 0x7ff;
+        let ex = (ix >> 20) as i32;
+        if ex - ey > 16 {
+            /* 2nd round, good to 118 bits */
+            let t = r;
+            w = f_n * PIO2_2;
+            r = t - w;
+            w = f_n * PIO2_2T - ((t - r) - w);
+            y0 = r - w;
+            let ey = (f64::to_bits(y0) >> 52) as i32 & 0x7ff;
+            if ex - ey > 49 {
+                /* 3rd round, good to 151 bits, covers all cases */
+                let t = r;
+                w = f_n * PIO2_3;
+                r = t - w;
+                w = f_n * PIO2_3T - ((t - r) - w);
+                y0 = r - w;
+            }
+        }
+        let y1 = (r - y0) - w;
+        (n, y0, y1)
+    }
+
+    if ix <= 0x400f6a7a {
+        /* |x| ~<= 5pi/4 */
+        if (ix & 0xfffff) == 0x921fb {
+            /* |x| ~= pi/2 or 2pi/2 */
+            return medium(x, ix); /* cancellation -- use medium case */
+        }
+        if ix <= 0x4002d97c {
+            /* |x| ~<= 3pi/4 */
+            if sign == 0 {
+                let z = x - PIO2_1; /* one round good to 85 bits */
+                let y0 = z - PIO2_1T;
+                let y1 = (z - y0) - PIO2_1T;
+                return (1, y0, y1);
+            } else {
+                let z = x + PIO2_1;
+                let y0 = z + PIO2_1T;
+                let y1 = (z - y0) + PIO2_1T;
+                return (-1, y0, y1);
+            }
+        } else if sign == 0 {
+            let z = x - 2.0 * PIO2_1;
+            let y0 = z - 2.0 * PIO2_1T;
+            let y1 = (z - y0) - 2.0 * PIO2_1T;
+            return (2, y0, y1);
+        } else {
+            let z = x + 2.0 * PIO2_1;
+            let y0 = z + 2.0 * PIO2_1T;
+            let y1 = (z - y0) + 2.0 * PIO2_1T;
+            return (-2, y0, y1);
+        }
+    }
+    if ix <= 0x401c463b {
+        /* |x| ~<= 9pi/4 */
+        if ix <= 0x4015fdbc {
+            /* |x| ~<= 7pi/4 */
+            if ix == 0x4012d97c {
+                /* |x| ~= 3pi/2 */
+                return medium(x, ix);
+            }
+            if sign == 0 {
+                let z = x - 3.0 * PIO2_1;
+                let y0 = z - 3.0 * PIO2_1T;
+                let y1 = (z - y0) - 3.0 * PIO2_1T;
+                return (3, y0, y1);
+            } else {
+                let z = x + 3.0 * PIO2_1;
+                let y0 = z + 3.0 * PIO2_1T;
+                let y1 = (z - y0) + 3.0 * PIO2_1T;
+                return (-3, y0, y1);
+            }
+        } else {
+            if ix == 0x401921fb {
+                /* |x| ~= 4pi/2 */
+                return medium(x, ix);
+            }
+            if sign == 0 {
+                let z = x - 4.0 * PIO2_1;
+                let y0 = z - 4.0 * PIO2_1T;
+                let y1 = (z - y0) - 4.0 * PIO2_1T;
+                return (4, y0, y1);
+            } else {
+                let z = x + 4.0 * PIO2_1;
+                let y0 = z + 4.0 * PIO2_1T;
+                let y1 = (z - y0) + 4.0 * PIO2_1T;
+                return (-4, y0, y1);
+            }
+        }
+    }
+    if ix < 0x413921fb {
+        /* |x| ~< 2^20*(pi/2), medium size */
+        return medium(x, ix);
+    }
+    /*
+     * all other (large) arguments
+     */
+    if ix >= 0x7ff00000 {
+        /* x is inf or NaN */
+        let y0 = x - x;
+        let y1 = y0;
+        return (0, y0, y1);
+    }
+    /* set z = scalbn(|x|,-ilogb(x)+23) */
+    let mut ui = f64::to_bits(x);
+    ui &= (!1) >> 12;
+    ui |= (0x3ff + 23) << 52;
+    let mut z = f64::from_bits(ui);
+    let mut tx = [0.0; 3];
+    for i in 0..2 {
+        tx[i] = z as i32 as f64;
+        z = (z - tx[i]) * x1p24;
+    }
+    tx[2] = z;
+    /* skip zero terms, first term is non-zero */
+    let mut i = 2;
+    while i != 0 && tx[i] == 0.0 {
+        i -= 1;
+    }
+    let mut ty = [0.0; 3];
+    let n = rem_pio2_large(&tx[..=i], &mut ty, ((ix >> 20) - (0x3ff + 23)) as i32, 1);
+    if sign != 0 {
+        return (-n, -ty[0], -ty[1]);
+    }
+    (n, ty[0], ty[1])
+}
+
+#[test]
+fn test_near_pi() {
+    assert_eq!(
+        rem_pio2(3.141592025756836),
+        (2, -6.278329573009626e-7, -2.1125998133974653e-23)
+    );
+    assert_eq!(
+        rem_pio2(3.141592033207416),
+        (2, -6.20382377148128e-7, -2.1125998133974653e-23)
+    );
+    assert_eq!(
+        rem_pio2(3.141592144966125),
+        (2, -5.086236681942706e-7, -2.1125998133974653e-23)
+    );
+    assert_eq!(
+        rem_pio2(3.141592979431152),
+        (2, 3.2584135866119817e-7, -2.1125998133974653e-23)
+    );
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/rem_pio2_large.rs.html b/src/libm/math/rem_pio2_large.rs.html new file mode 100644 index 000000000..5f075f50b --- /dev/null +++ b/src/libm/math/rem_pio2_large.rs.html @@ -0,0 +1,945 @@ +rem_pio2_large.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+
+#![allow(unused_unsafe)]
+/* origin: FreeBSD /usr/src/lib/msun/src/k_rem_pio2.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::floor;
+use super::scalbn;
+
+// initial value for jk
+const INIT_JK: [usize; 4] = [3, 4, 4, 6];
+
+// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+//
+//              integer array, contains the (24*i)-th to (24*i+23)-th
+//              bit of 2/pi after binary point. The corresponding
+//              floating value is
+//
+//                      ipio2[i] * 2^(-24(i+1)).
+//
+// NB: This table must have at least (e0-3)/24 + jk terms.
+//     For quad precision (e0 <= 16360, jk = 6), this is 686.
+#[cfg(target_pointer_width = "32")]
+const IPIO2: [i32; 66] = [
+    0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C, 0x439041, 0xFE5163,
+    0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
+    0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C,
+    0x845F8B, 0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
+    0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5, 0xF17B3D, 0x0739F7, 0x8A5292,
+    0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
+    0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA,
+    0x73A8C9, 0x60E27B, 0xC08C6B,
+];
+
+#[cfg(target_pointer_width = "64")]
+const IPIO2: [i32; 690] = [
+    0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C, 0x439041, 0xFE5163,
+    0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
+    0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C,
+    0x845F8B, 0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
+    0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5, 0xF17B3D, 0x0739F7, 0x8A5292,
+    0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
+    0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA,
+    0x73A8C9, 0x60E27B, 0xC08C6B, 0x47C419, 0xC367CD, 0xDCE809, 0x2A8359, 0xC4768B, 0x961CA6,
+    0xDDAF44, 0xD15719, 0x053EA5, 0xFF0705, 0x3F7E33, 0xE832C2, 0xDE4F98, 0x327DBB, 0xC33D26,
+    0xEF6B1E, 0x5EF89F, 0x3A1F35, 0xCAF27F, 0x1D87F1, 0x21907C, 0x7C246A, 0xFA6ED5, 0x772D30,
+    0x433B15, 0xC614B5, 0x9D19C3, 0xC2C4AD, 0x414D2C, 0x5D000C, 0x467D86, 0x2D71E3, 0x9AC69B,
+    0x006233, 0x7CD2B4, 0x97A7B4, 0xD55537, 0xF63ED7, 0x1810A3, 0xFC764D, 0x2A9D64, 0xABD770,
+    0xF87C63, 0x57B07A, 0xE71517, 0x5649C0, 0xD9D63B, 0x3884A7, 0xCB2324, 0x778AD6, 0x23545A,
+    0xB91F00, 0x1B0AF1, 0xDFCE19, 0xFF319F, 0x6A1E66, 0x615799, 0x47FBAC, 0xD87F7E, 0xB76522,
+    0x89E832, 0x60BFE6, 0xCDC4EF, 0x09366C, 0xD43F5D, 0xD7DE16, 0xDE3B58, 0x929BDE, 0x2822D2,
+    0xE88628, 0x4D58E2, 0x32CAC6, 0x16E308, 0xCB7DE0, 0x50C017, 0xA71DF3, 0x5BE018, 0x34132E,
+    0x621283, 0x014883, 0x5B8EF5, 0x7FB0AD, 0xF2E91E, 0x434A48, 0xD36710, 0xD8DDAA, 0x425FAE,
+    0xCE616A, 0xA4280A, 0xB499D3, 0xF2A606, 0x7F775C, 0x83C2A3, 0x883C61, 0x78738A, 0x5A8CAF,
+    0xBDD76F, 0x63A62D, 0xCBBFF4, 0xEF818D, 0x67C126, 0x45CA55, 0x36D9CA, 0xD2A828, 0x8D61C2,
+    0x77C912, 0x142604, 0x9B4612, 0xC459C4, 0x44C5C8, 0x91B24D, 0xF31700, 0xAD43D4, 0xE54929,
+    0x10D5FD, 0xFCBE00, 0xCC941E, 0xEECE70, 0xF53E13, 0x80F1EC, 0xC3E7B3, 0x28F8C7, 0x940593,
+    0x3E71C1, 0xB3092E, 0xF3450B, 0x9C1288, 0x7B20AB, 0x9FB52E, 0xC29247, 0x2F327B, 0x6D550C,
+    0x90A772, 0x1FE76B, 0x96CB31, 0x4A1679, 0xE27941, 0x89DFF4, 0x9794E8, 0x84E6E2, 0x973199,
+    0x6BED88, 0x365F5F, 0x0EFDBB, 0xB49A48, 0x6CA467, 0x427271, 0x325D8D, 0xB8159F, 0x09E5BC,
+    0x25318D, 0x3974F7, 0x1C0530, 0x010C0D, 0x68084B, 0x58EE2C, 0x90AA47, 0x02E774, 0x24D6BD,
+    0xA67DF7, 0x72486E, 0xEF169F, 0xA6948E, 0xF691B4, 0x5153D1, 0xF20ACF, 0x339820, 0x7E4BF5,
+    0x6863B2, 0x5F3EDD, 0x035D40, 0x7F8985, 0x295255, 0xC06437, 0x10D86D, 0x324832, 0x754C5B,
+    0xD4714E, 0x6E5445, 0xC1090B, 0x69F52A, 0xD56614, 0x9D0727, 0x50045D, 0xDB3BB4, 0xC576EA,
+    0x17F987, 0x7D6B49, 0xBA271D, 0x296996, 0xACCCC6, 0x5414AD, 0x6AE290, 0x89D988, 0x50722C,
+    0xBEA404, 0x940777, 0x7030F3, 0x27FC00, 0xA871EA, 0x49C266, 0x3DE064, 0x83DD97, 0x973FA3,
+    0xFD9443, 0x8C860D, 0xDE4131, 0x9D3992, 0x8C70DD, 0xE7B717, 0x3BDF08, 0x2B3715, 0xA0805C,
+    0x93805A, 0x921110, 0xD8E80F, 0xAF806C, 0x4BFFDB, 0x0F9038, 0x761859, 0x15A562, 0xBBCB61,
+    0xB989C7, 0xBD4010, 0x04F2D2, 0x277549, 0xF6B6EB, 0xBB22DB, 0xAA140A, 0x2F2689, 0x768364,
+    0x333B09, 0x1A940E, 0xAA3A51, 0xC2A31D, 0xAEEDAF, 0x12265C, 0x4DC26D, 0x9C7A2D, 0x9756C0,
+    0x833F03, 0xF6F009, 0x8C402B, 0x99316D, 0x07B439, 0x15200C, 0x5BC3D8, 0xC492F5, 0x4BADC6,
+    0xA5CA4E, 0xCD37A7, 0x36A9E6, 0x9492AB, 0x6842DD, 0xDE6319, 0xEF8C76, 0x528B68, 0x37DBFC,
+    0xABA1AE, 0x3115DF, 0xA1AE00, 0xDAFB0C, 0x664D64, 0xB705ED, 0x306529, 0xBF5657, 0x3AFF47,
+    0xB9F96A, 0xF3BE75, 0xDF9328, 0x3080AB, 0xF68C66, 0x15CB04, 0x0622FA, 0x1DE4D9, 0xA4B33D,
+    0x8F1B57, 0x09CD36, 0xE9424E, 0xA4BE13, 0xB52333, 0x1AAAF0, 0xA8654F, 0xA5C1D2, 0x0F3F0B,
+    0xCD785B, 0x76F923, 0x048B7B, 0x721789, 0x53A6C6, 0xE26E6F, 0x00EBEF, 0x584A9B, 0xB7DAC4,
+    0xBA66AA, 0xCFCF76, 0x1D02D1, 0x2DF1B1, 0xC1998C, 0x77ADC3, 0xDA4886, 0xA05DF7, 0xF480C6,
+    0x2FF0AC, 0x9AECDD, 0xBC5C3F, 0x6DDED0, 0x1FC790, 0xB6DB2A, 0x3A25A3, 0x9AAF00, 0x9353AD,
+    0x0457B6, 0xB42D29, 0x7E804B, 0xA707DA, 0x0EAA76, 0xA1597B, 0x2A1216, 0x2DB7DC, 0xFDE5FA,
+    0xFEDB89, 0xFDBE89, 0x6C76E4, 0xFCA906, 0x70803E, 0x156E85, 0xFF87FD, 0x073E28, 0x336761,
+    0x86182A, 0xEABD4D, 0xAFE7B3, 0x6E6D8F, 0x396795, 0x5BBF31, 0x48D784, 0x16DF30, 0x432DC7,
+    0x356125, 0xCE70C9, 0xB8CB30, 0xFD6CBF, 0xA200A4, 0xE46C05, 0xA0DD5A, 0x476F21, 0xD21262,
+    0x845CB9, 0x496170, 0xE0566B, 0x015299, 0x375550, 0xB7D51E, 0xC4F133, 0x5F6E13, 0xE4305D,
+    0xA92E85, 0xC3B21D, 0x3632A1, 0xA4B708, 0xD4B1EA, 0x21F716, 0xE4698F, 0x77FF27, 0x80030C,
+    0x2D408D, 0xA0CD4F, 0x99A520, 0xD3A2B3, 0x0A5D2F, 0x42F9B4, 0xCBDA11, 0xD0BE7D, 0xC1DB9B,
+    0xBD17AB, 0x81A2CA, 0x5C6A08, 0x17552E, 0x550027, 0xF0147F, 0x8607E1, 0x640B14, 0x8D4196,
+    0xDEBE87, 0x2AFDDA, 0xB6256B, 0x34897B, 0xFEF305, 0x9EBFB9, 0x4F6A68, 0xA82A4A, 0x5AC44F,
+    0xBCF82D, 0x985AD7, 0x95C7F4, 0x8D4D0D, 0xA63A20, 0x5F57A4, 0xB13F14, 0x953880, 0x0120CC,
+    0x86DD71, 0xB6DEC9, 0xF560BF, 0x11654D, 0x6B0701, 0xACB08C, 0xD0C0B2, 0x485551, 0x0EFB1E,
+    0xC37295, 0x3B06A3, 0x3540C0, 0x7BDC06, 0xCC45E0, 0xFA294E, 0xC8CAD6, 0x41F3E8, 0xDE647C,
+    0xD8649B, 0x31BED9, 0xC397A4, 0xD45877, 0xC5E369, 0x13DAF0, 0x3C3ABA, 0x461846, 0x5F7555,
+    0xF5BDD2, 0xC6926E, 0x5D2EAC, 0xED440E, 0x423E1C, 0x87C461, 0xE9FD29, 0xF3D6E7, 0xCA7C22,
+    0x35916F, 0xC5E008, 0x8DD7FF, 0xE26A6E, 0xC6FDB0, 0xC10893, 0x745D7C, 0xB2AD6B, 0x9D6ECD,
+    0x7B723E, 0x6A11C6, 0xA9CFF7, 0xDF7329, 0xBAC9B5, 0x5100B7, 0x0DB2E2, 0x24BA74, 0x607DE5,
+    0x8AD874, 0x2C150D, 0x0C1881, 0x94667E, 0x162901, 0x767A9F, 0xBEFDFD, 0xEF4556, 0x367ED9,
+    0x13D9EC, 0xB9BA8B, 0xFC97C4, 0x27A831, 0xC36EF1, 0x36C594, 0x56A8D8, 0xB5A8B4, 0x0ECCCF,
+    0x2D8912, 0x34576F, 0x89562C, 0xE3CE99, 0xB920D6, 0xAA5E6B, 0x9C2A3E, 0xCC5F11, 0x4A0BFD,
+    0xFBF4E1, 0x6D3B8E, 0x2C86E2, 0x84D4E9, 0xA9B4FC, 0xD1EEEF, 0xC9352E, 0x61392F, 0x442138,
+    0xC8D91B, 0x0AFC81, 0x6A4AFB, 0xD81C2F, 0x84B453, 0x8C994E, 0xCC2254, 0xDC552A, 0xD6C6C0,
+    0x96190B, 0xB8701A, 0x649569, 0x605A26, 0xEE523F, 0x0F117F, 0x11B5F4, 0xF5CBFC, 0x2DBC34,
+    0xEEBC34, 0xCC5DE8, 0x605EDD, 0x9B8E67, 0xEF3392, 0xB817C9, 0x9B5861, 0xBC57E1, 0xC68351,
+    0x103ED8, 0x4871DD, 0xDD1C2D, 0xA118AF, 0x462C21, 0xD7F359, 0x987AD9, 0xC0549E, 0xFA864F,
+    0xFC0656, 0xAE79E5, 0x362289, 0x22AD38, 0xDC9367, 0xAAE855, 0x382682, 0x9BE7CA, 0xA40D51,
+    0xB13399, 0x0ED7A9, 0x480569, 0xF0B265, 0xA7887F, 0x974C88, 0x36D1F9, 0xB39221, 0x4A827B,
+    0x21CF98, 0xDC9F40, 0x5547DC, 0x3A74E1, 0x42EB67, 0xDF9DFE, 0x5FD45E, 0xA4677B, 0x7AACBA,
+    0xA2F655, 0x23882B, 0x55BA41, 0x086E59, 0x862A21, 0x834739, 0xE6E389, 0xD49EE5, 0x40FB49,
+    0xE956FF, 0xCA0F1C, 0x8A59C5, 0x2BFA94, 0xC5C1D3, 0xCFC50F, 0xAE5ADB, 0x86C547, 0x624385,
+    0x3B8621, 0x94792C, 0x876110, 0x7B4C2A, 0x1A2C80, 0x12BF43, 0x902688, 0x893C78, 0xE4C4A8,
+    0x7BDBE5, 0xC23AC4, 0xEAF426, 0x8A67F7, 0xBF920D, 0x2BA365, 0xB1933D, 0x0B7CBD, 0xDC51A4,
+    0x63DD27, 0xDDE169, 0x19949A, 0x9529A8, 0x28CE68, 0xB4ED09, 0x209F44, 0xCA984E, 0x638270,
+    0x237C7E, 0x32B90F, 0x8EF5A7, 0xE75614, 0x08F121, 0x2A9DB5, 0x4D7E6F, 0x5119A5, 0xABF9B5,
+    0xD6DF82, 0x61DD96, 0x023616, 0x9F3AC4, 0xA1A283, 0x6DED72, 0x7A8D39, 0xA9B882, 0x5C326B,
+    0x5B2746, 0xED3400, 0x7700D2, 0x55F4FC, 0x4D5901, 0x8071E0,
+];
+
+const PIO2: [f64; 8] = [
+    1.57079625129699707031e+00, /* 0x3FF921FB, 0x40000000 */
+    7.54978941586159635335e-08, /* 0x3E74442D, 0x00000000 */
+    5.39030252995776476554e-15, /* 0x3CF84698, 0x80000000 */
+    3.28200341580791294123e-22, /* 0x3B78CC51, 0x60000000 */
+    1.27065575308067607349e-29, /* 0x39F01B83, 0x80000000 */
+    1.22933308981111328932e-36, /* 0x387A2520, 0x40000000 */
+    2.73370053816464559624e-44, /* 0x36E38222, 0x80000000 */
+    2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
+];
+
+// fn rem_pio2_large(x : &[f64], y : &mut [f64], e0 : i32, prec : usize) -> i32
+//
+// Input parameters:
+//      x[]     The input value (must be positive) is broken into nx
+//              pieces of 24-bit integers in double precision format.
+//              x[i] will be the i-th 24 bit of x. The scaled exponent
+//              of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
+//              match x's up to 24 bits.
+//
+//              Example of breaking a double positive z into x[0]+x[1]+x[2]:
+//                      e0 = ilogb(z)-23
+//                      z  = scalbn(z,-e0)
+//              for i = 0,1,2
+//                      x[i] = floor(z)
+//                      z    = (z-x[i])*2**24
+//
+//      y[]     ouput result in an array of double precision numbers.
+//              The dimension of y[] is:
+//                      24-bit  precision       1
+//                      53-bit  precision       2
+//                      64-bit  precision       2
+//                      113-bit precision       3
+//              The actual value is the sum of them. Thus for 113-bit
+//              precison, one may have to do something like:
+//
+//              long double t,w,r_head, r_tail;
+//              t = (long double)y[2] + (long double)y[1];
+//              w = (long double)y[0];
+//              r_head = t+w;
+//              r_tail = w - (r_head - t);
+//
+//      e0      The exponent of x[0]. Must be <= 16360 or you need to
+//              expand the ipio2 table.
+//
+//      prec    an integer indicating the precision:
+//                      0       24  bits (single)
+//                      1       53  bits (double)
+//                      2       64  bits (extended)
+//                      3       113 bits (quad)
+//
+// Here is the description of some local variables:
+//
+//      jk      jk+1 is the initial number of terms of ipio2[] needed
+//              in the computation. The minimum and recommended value
+//              for jk is 3,4,4,6 for single, double, extended, and quad.
+//              jk+1 must be 2 larger than you might expect so that our
+//              recomputation test works. (Up to 24 bits in the integer
+//              part (the 24 bits of it that we compute) and 23 bits in
+//              the fraction part may be lost to cancelation before we
+//              recompute.)
+//
+//      jz      local integer variable indicating the number of
+//              terms of ipio2[] used.
+//
+//      jx      nx - 1
+//
+//      jv      index for pointing to the suitable ipio2[] for the
+//              computation. In general, we want
+//                      ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+//              is an integer. Thus
+//                      e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+//              Hence jv = max(0,(e0-3)/24).
+//
+//      jp      jp+1 is the number of terms in PIo2[] needed, jp = jk.
+//
+//      q[]     double array with integral value, representing the
+//              24-bits chunk of the product of x and 2/pi.
+//
+//      q0      the corresponding exponent of q[0]. Note that the
+//              exponent for q[i] would be q0-24*i.
+//
+//      PIo2[]  double precision array, obtained by cutting pi/2
+//              into 24 bits chunks.
+//
+//      f[]     ipio2[] in floating point
+//
+//      iq[]    integer array by breaking up q[] in 24-bits chunk.
+//
+//      fq[]    final product of x*(2/pi) in fq[0],..,fq[jk]
+//
+//      ih      integer. If >0 it indicates q[] is >= 0.5, hence
+//              it also indicates the *sign* of the result.
+
+/// Return the last three digits of N with y = x - N*pi/2
+/// so that |y| < pi/2.
+///
+/// The method is to compute the integer (mod 8) and fraction parts of
+/// (2/pi)*x without doing the full multiplication. In general we
+/// skip the part of the product that are known to be a huge integer (
+/// more accurately, = 0 mod 8 ). Thus the number of operations are
+/// independent of the exponent of the input.
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn rem_pio2_large(x: &[f64], y: &mut [f64], e0: i32, prec: usize) -> i32 {
+    let x1p24 = f64::from_bits(0x4170000000000000); // 0x1p24 === 2 ^ 24
+    let x1p_24 = f64::from_bits(0x3e70000000000000); // 0x1p_24 === 2 ^ (-24)
+
+    #[cfg(all(target_pointer_width = "64", feature = "checked"))]
+    assert!(e0 <= 16360);
+
+    let nx = x.len();
+
+    let mut fw: f64;
+    let mut n: i32;
+    let mut ih: i32;
+    let mut z: f64;
+    let mut f: [f64; 20] = [0.; 20];
+    let mut fq: [f64; 20] = [0.; 20];
+    let mut q: [f64; 20] = [0.; 20];
+    let mut iq: [i32; 20] = [0; 20];
+
+    /* initialize jk*/
+    let jk = INIT_JK[prec];
+    let jp = jk;
+
+    /* determine jx,jv,q0, note that 3>q0 */
+    let jx = nx - 1;
+    let mut jv = (e0 - 3) / 24;
+    if jv < 0 {
+        jv = 0;
+    }
+    let mut q0 = e0 - 24 * (jv + 1);
+    let jv = jv as usize;
+
+    /* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
+    let mut j = (jv - jx) as i32;
+    let m = jx + jk;
+    for i in 0..=m {
+        i!(f, i, =, if j < 0 {
+            0.
+        } else {
+            i!(IPIO2, j as usize) as f64
+        });
+        j += 1;
+    }
+
+    /* compute q[0],q[1],...q[jk] */
+    for i in 0..=jk {
+        fw = 0f64;
+        for j in 0..=jx {
+            fw += i!(x, j) * i!(f, jx + i - j);
+        }
+        i!(q, i, =, fw);
+    }
+
+    let mut jz = jk;
+
+    'recompute: loop {
+        /* distill q[] into iq[] reversingly */
+        let mut i = 0i32;
+        z = i!(q, jz);
+        for j in (1..=jz).rev() {
+            fw = (x1p_24 * z) as i32 as f64;
+            i!(iq, i as usize, =, (z - x1p24 * fw) as i32);
+            z = i!(q, j - 1) + fw;
+            i += 1;
+        }
+
+        /* compute n */
+        z = scalbn(z, q0); /* actual value of z */
+        z -= 8.0 * floor(z * 0.125); /* trim off integer >= 8 */
+        n = z as i32;
+        z -= n as f64;
+        ih = 0;
+        if q0 > 0 {
+            /* need iq[jz-1] to determine n */
+            i = i!(iq, jz - 1) >> (24 - q0);
+            n += i;
+            i!(iq, jz - 1, -=, i << (24 - q0));
+            ih = i!(iq, jz - 1) >> (23 - q0);
+        } else if q0 == 0 {
+            ih = i!(iq, jz - 1) >> 23;
+        } else if z >= 0.5 {
+            ih = 2;
+        }
+
+        if ih > 0 {
+            /* q > 0.5 */
+            n += 1;
+            let mut carry = 0i32;
+            for i in 0..jz {
+                /* compute 1-q */
+                let j = i!(iq, i);
+                if carry == 0 {
+                    if j != 0 {
+                        carry = 1;
+                        i!(iq, i, =, 0x1000000 - j);
+                    }
+                } else {
+                    i!(iq, i, =, 0xffffff - j);
+                }
+            }
+            if q0 > 0 {
+                /* rare case: chance is 1 in 12 */
+                match q0 {
+                    1 => {
+                        i!(iq, jz - 1, &=, 0x7fffff);
+                    }
+                    2 => {
+                        i!(iq, jz - 1, &=, 0x3fffff);
+                    }
+                    _ => {}
+                }
+            }
+            if ih == 2 {
+                z = 1. - z;
+                if carry != 0 {
+                    z -= scalbn(1., q0);
+                }
+            }
+        }
+
+        /* check if recomputation is needed */
+        if z == 0. {
+            let mut j = 0;
+            for i in (jk..=jz - 1).rev() {
+                j |= i!(iq, i);
+            }
+            if j == 0 {
+                /* need recomputation */
+                let mut k = 1;
+                while i!(iq, jk - k, ==, 0) {
+                    k += 1; /* k = no. of terms needed */
+                }
+
+                for i in (jz + 1)..=(jz + k) {
+                    /* add q[jz+1] to q[jz+k] */
+                    i!(f, jx + i, =, i!(IPIO2, jv + i) as f64);
+                    fw = 0f64;
+                    for j in 0..=jx {
+                        fw += i!(x, j) * i!(f, jx + i - j);
+                    }
+                    i!(q, i, =, fw);
+                }
+                jz += k;
+                continue 'recompute;
+            }
+        }
+
+        break;
+    }
+
+    /* chop off zero terms */
+    if z == 0. {
+        jz -= 1;
+        q0 -= 24;
+        while i!(iq, jz) == 0 {
+            jz -= 1;
+            q0 -= 24;
+        }
+    } else {
+        /* break z into 24-bit if necessary */
+        z = scalbn(z, -q0);
+        if z >= x1p24 {
+            fw = (x1p_24 * z) as i32 as f64;
+            i!(iq, jz, =, (z - x1p24 * fw) as i32);
+            jz += 1;
+            q0 += 24;
+            i!(iq, jz, =, fw as i32);
+        } else {
+            i!(iq, jz, =, z as i32);
+        }
+    }
+
+    /* convert integer "bit" chunk to floating-point value */
+    fw = scalbn(1., q0);
+    for i in (0..=jz).rev() {
+        i!(q, i, =, fw * (i!(iq, i) as f64));
+        fw *= x1p_24;
+    }
+
+    /* compute PIo2[0,...,jp]*q[jz,...,0] */
+    for i in (0..=jz).rev() {
+        fw = 0f64;
+        let mut k = 0;
+        while (k <= jp) && (k <= jz - i) {
+            fw += i!(PIO2, k) * i!(q, i + k);
+            k += 1;
+        }
+        i!(fq, jz - i, =, fw);
+    }
+
+    /* compress fq[] into y[] */
+    match prec {
+        0 => {
+            fw = 0f64;
+            for i in (0..=jz).rev() {
+                fw += i!(fq, i);
+            }
+            i!(y, 0, =, if ih == 0 { fw } else { -fw });
+        }
+        1 | 2 => {
+            fw = 0f64;
+            for i in (0..=jz).rev() {
+                fw += i!(fq, i);
+            }
+            // TODO: drop excess precision here once double_t is used
+            fw = fw as f64;
+            i!(y, 0, =, if ih == 0 { fw } else { -fw });
+            fw = i!(fq, 0) - fw;
+            for i in 1..=jz {
+                fw += i!(fq, i);
+            }
+            i!(y, 1, =, if ih == 0 { fw } else { -fw });
+        }
+        3 => {
+            /* painful */
+            for i in (1..=jz).rev() {
+                fw = i!(fq, i - 1) + i!(fq, i);
+                i!(fq, i, +=, i!(fq, i - 1) - fw);
+                i!(fq, i - 1, =, fw);
+            }
+            for i in (2..=jz).rev() {
+                fw = i!(fq, i - 1) + i!(fq, i);
+                i!(fq, i, +=, i!(fq, i - 1) - fw);
+                i!(fq, i - 1, =, fw);
+            }
+            fw = 0f64;
+            for i in (2..=jz).rev() {
+                fw += i!(fq, i);
+            }
+            if ih == 0 {
+                i!(y, 0, =, i!(fq, 0));
+                i!(y, 1, =, i!(fq, 1));
+                i!(y, 2, =, fw);
+            } else {
+                i!(y, 0, =, -i!(fq, 0));
+                i!(y, 1, =, -i!(fq, 1));
+                i!(y, 2, =, -fw);
+            }
+        }
+        #[cfg(feature = "checked")]
+        _ => unreachable!(),
+        #[cfg(not(feature = "checked"))]
+        _ => {}
+    }
+    n & 7
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/rem_pio2f.rs.html b/src/libm/math/rem_pio2f.rs.html new file mode 100644 index 000000000..aa8d3fc46 --- /dev/null +++ b/src/libm/math/rem_pio2f.rs.html @@ -0,0 +1,129 @@ +rem_pio2f.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_rem_pio2f.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Debugged and optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::rem_pio2_large;
+
+use core::f64;
+
+const TOINT: f64 = 1.5 / f64::EPSILON;
+
+/// 53 bits of 2/pi
+const INV_PIO2: f64 = 6.36619772367581382433e-01; /* 0x3FE45F30, 0x6DC9C883 */
+/// first 25 bits of pi/2
+const PIO2_1: f64 = 1.57079631090164184570e+00; /* 0x3FF921FB, 0x50000000 */
+/// pi/2 - pio2_1
+const PIO2_1T: f64 = 1.58932547735281966916e-08; /* 0x3E5110b4, 0x611A6263 */
+
+/// Return the remainder of x rem pi/2 in *y
+///
+/// use double precision for everything except passing x
+/// use __rem_pio2_large() for large x
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub(crate) fn rem_pio2f(x: f32) -> (i32, f64) {
+    let x64 = x as f64;
+
+    let mut tx: [f64; 1] = [0.];
+    let mut ty: [f64; 1] = [0.];
+
+    let ix = x.to_bits() & 0x7fffffff;
+    /* 25+53 bit pi is good enough for medium size */
+    if ix < 0x4dc90fdb {
+        /* |x| ~< 2^28*(pi/2), medium size */
+        /* Use a specialized rint() to get fn.  Assume round-to-nearest. */
+        let f_n = x64 * INV_PIO2 + TOINT - TOINT;
+        return (f_n as i32, x64 - f_n * PIO2_1 - f_n * PIO2_1T);
+    }
+    if ix >= 0x7f800000 {
+        /* x is inf or NaN */
+        return (0, x64 - x64);
+    }
+    /* scale x into [2^23, 2^24-1] */
+    let sign = (x.to_bits() >> 31) != 0;
+    let e0 = ((ix >> 23) - (0x7f + 23)) as i32; /* e0 = ilogb(|x|)-23, positive */
+    tx[0] = f32::from_bits(ix - (e0 << 23) as u32) as f64;
+    let n = rem_pio2_large(&tx, &mut ty, e0, 0);
+    if sign {
+        return (-n, -ty[0]);
+    }
+    (n, ty[0])
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/remquo.rs.html b/src/libm/math/remquo.rs.html new file mode 100644 index 000000000..94e767f7d --- /dev/null +++ b/src/libm/math/remquo.rs.html @@ -0,0 +1,197 @@ +remquo.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+
+pub fn remquo(mut x: f64, mut y: f64) -> (f64, i32) {
+    let ux: u64 = x.to_bits();
+    let mut uy: u64 = y.to_bits();
+    let mut ex = ((ux >> 52) & 0x7ff) as i32;
+    let mut ey = ((uy >> 52) & 0x7ff) as i32;
+    let sx = (ux >> 63) != 0;
+    let sy = (uy >> 63) != 0;
+    let mut q: u32;
+    let mut i: u64;
+    let mut uxi: u64 = ux;
+
+    if (uy << 1) == 0 || y.is_nan() || ex == 0x7ff {
+        return ((x * y) / (x * y), 0);
+    }
+    if (ux << 1) == 0 {
+        return (x, 0);
+    }
+
+    /* normalize x and y */
+    if ex == 0 {
+        i = uxi << 12;
+        while (i >> 63) == 0 {
+            ex -= 1;
+            i <<= 1;
+        }
+        uxi <<= -ex + 1;
+    } else {
+        uxi &= (!0) >> 12;
+        uxi |= 1 << 52;
+    }
+    if ey == 0 {
+        i = uy << 12;
+        while (i >> 63) == 0 {
+            ey -= 1;
+            i <<= 1;
+        }
+        uy <<= -ey + 1;
+    } else {
+        uy &= (!0) >> 12;
+        uy |= 1 << 52;
+    }
+
+    q = 0;
+
+    if ex + 1 != ey {
+        if ex < ey {
+            return (x, 0);
+        }
+        /* x mod y */
+        while ex > ey {
+            i = uxi.wrapping_sub(uy);
+            if (i >> 63) == 0 {
+                uxi = i;
+                q += 1;
+            }
+            uxi <<= 1;
+            q <<= 1;
+            ex -= 1;
+        }
+        i = uxi.wrapping_sub(uy);
+        if (i >> 63) == 0 {
+            uxi = i;
+            q += 1;
+        }
+        if uxi == 0 {
+            ex = -60;
+        } else {
+            while (uxi >> 52) == 0 {
+                uxi <<= 1;
+                ex -= 1;
+            }
+        }
+    }
+
+    /* scale result and decide between |x| and |x|-|y| */
+    if ex > 0 {
+        uxi -= 1 << 52;
+        uxi |= (ex as u64) << 52;
+    } else {
+        uxi >>= -ex + 1;
+    }
+    x = f64::from_bits(uxi);
+    if sy {
+        y = -y;
+    }
+    if ex == ey || (ex + 1 == ey && (2.0 * x > y || (2.0 * x == y && (q % 2) != 0))) {
+        x -= y;
+        q += 1;
+    }
+    q &= 0x7fffffff;
+    let quo = if sx ^ sy { -(q as i32) } else { q as i32 };
+    if sx {
+        (-x, quo)
+    } else {
+        (x, quo)
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/remquof.rs.html b/src/libm/math/remquof.rs.html new file mode 100644 index 000000000..a01429863 --- /dev/null +++ b/src/libm/math/remquof.rs.html @@ -0,0 +1,195 @@ +remquof.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+
+pub fn remquof(mut x: f32, mut y: f32) -> (f32, i32) {
+    let ux: u32 = x.to_bits();
+    let mut uy: u32 = y.to_bits();
+    let mut ex = ((ux >> 23) & 0xff) as i32;
+    let mut ey = ((uy >> 23) & 0xff) as i32;
+    let sx = (ux >> 31) != 0;
+    let sy = (uy >> 31) != 0;
+    let mut q: u32;
+    let mut i: u32;
+    let mut uxi: u32 = ux;
+
+    if (uy << 1) == 0 || y.is_nan() || ex == 0xff {
+        return ((x * y) / (x * y), 0);
+    }
+    if (ux << 1) == 0 {
+        return (x, 0);
+    }
+
+    /* normalize x and y */
+    if ex == 0 {
+        i = uxi << 9;
+        while (i >> 31) == 0 {
+            ex -= 1;
+            i <<= 1;
+        }
+        uxi <<= -ex + 1;
+    } else {
+        uxi &= (!0) >> 9;
+        uxi |= 1 << 23;
+    }
+    if ey == 0 {
+        i = uy << 9;
+        while (i >> 31) == 0 {
+            ey -= 1;
+            i <<= 1;
+        }
+        uy <<= -ey + 1;
+    } else {
+        uy &= (!0) >> 9;
+        uy |= 1 << 23;
+    }
+
+    q = 0;
+    if ex + 1 != ey {
+        if ex < ey {
+            return (x, 0);
+        }
+        /* x mod y */
+        while ex > ey {
+            i = uxi.wrapping_sub(uy);
+            if (i >> 31) == 0 {
+                uxi = i;
+                q += 1;
+            }
+            uxi <<= 1;
+            q <<= 1;
+            ex -= 1;
+        }
+        i = uxi.wrapping_sub(uy);
+        if (i >> 31) == 0 {
+            uxi = i;
+            q += 1;
+        }
+        if uxi == 0 {
+            ex = -30;
+        } else {
+            while (uxi >> 23) == 0 {
+                uxi <<= 1;
+                ex -= 1;
+            }
+        }
+    }
+
+    /* scale result and decide between |x| and |x|-|y| */
+    if ex > 0 {
+        uxi -= 1 << 23;
+        uxi |= (ex as u32) << 23;
+    } else {
+        uxi >>= -ex + 1;
+    }
+    x = f32::from_bits(uxi);
+    if sy {
+        y = -y;
+    }
+    if ex == ey || (ex + 1 == ey && (2.0 * x > y || (2.0 * x == y && (q % 2) != 0))) {
+        x -= y;
+        q += 1;
+    }
+    q &= 0x7fffffff;
+    let quo = if sx ^ sy { -(q as i32) } else { q as i32 };
+    if sx {
+        (-x, quo)
+    } else {
+        (x, quo)
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/round.rs.html b/src/libm/math/round.rs.html new file mode 100644 index 000000000..9fd15cb08 --- /dev/null +++ b/src/libm/math/round.rs.html @@ -0,0 +1,77 @@ +round.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+
+use core::f64;
+
+const TOINT: f64 = 1.0 / f64::EPSILON;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn round(mut x: f64) -> f64 {
+    let (f, i) = (x, x.to_bits());
+    let e: u64 = i >> 52 & 0x7ff;
+    let mut y: f64;
+
+    if e >= 0x3ff + 52 {
+        return x;
+    }
+    if i >> 63 != 0 {
+        x = -x;
+    }
+    if e < 0x3ff - 1 {
+        // raise inexact if x!=0
+        force_eval!(x + TOINT);
+        return 0.0 * f;
+    }
+    y = x + TOINT - TOINT - x;
+    if y > 0.5 {
+        y = y + x - 1.0;
+    } else if y <= -0.5 {
+        y = y + x + 1.0;
+    } else {
+        y = y + x;
+    }
+
+    if i >> 63 != 0 {
+        -y
+    } else {
+        y
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/roundf.rs.html b/src/libm/math/roundf.rs.html new file mode 100644 index 000000000..25e8dd0ce --- /dev/null +++ b/src/libm/math/roundf.rs.html @@ -0,0 +1,73 @@ +roundf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+
+use core::f32;
+
+const TOINT: f32 = 1.0 / f32::EPSILON;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn roundf(mut x: f32) -> f32 {
+    let i = x.to_bits();
+    let e: u32 = i >> 23 & 0xff;
+    let mut y: f32;
+
+    if e >= 0x7f + 23 {
+        return x;
+    }
+    if i >> 31 != 0 {
+        x = -x;
+    }
+    if e < 0x7f - 1 {
+        force_eval!(x + TOINT);
+        return 0.0 * x;
+    }
+    y = x + TOINT - TOINT - x;
+    if y > 0.5f32 {
+        y = y + x - 1.0;
+    } else if y <= -0.5f32 {
+        y = y + x + 1.0;
+    } else {
+        y = y + x;
+    }
+    if i >> 31 != 0 {
+        -y
+    } else {
+        y
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/scalbn.rs.html b/src/libm/math/scalbn.rs.html new file mode 100644 index 000000000..0c5b27281 --- /dev/null +++ b/src/libm/math/scalbn.rs.html @@ -0,0 +1,71 @@ +scalbn.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn scalbn(x: f64, mut n: i32) -> f64 {
+    let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 === 2 ^ 1023
+    let x1p53 = f64::from_bits(0x4340000000000000); // 0x1p53 === 2 ^ 53
+    let x1p_1022 = f64::from_bits(0x0010000000000000); // 0x1p-1022 === 2 ^ (-1022)
+
+    let mut y = x;
+
+    if n > 1023 {
+        y *= x1p1023;
+        n -= 1023;
+        if n > 1023 {
+            y *= x1p1023;
+            n -= 1023;
+            if n > 1023 {
+                n = 1023;
+            }
+        }
+    } else if n < -1022 {
+        /* make sure final n < -53 to avoid double
+        rounding in the subnormal range */
+        y *= x1p_1022 * x1p53;
+        n += 1022 - 53;
+        if n < -1022 {
+            y *= x1p_1022 * x1p53;
+            n += 1022 - 53;
+            if n < -1022 {
+                n = -1022;
+            }
+        }
+    }
+    y * f64::from_bits(((0x3ff + n) as u64) << 52)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/scalbnf.rs.html b/src/libm/math/scalbnf.rs.html new file mode 100644 index 000000000..e9230f035 --- /dev/null +++ b/src/libm/math/scalbnf.rs.html @@ -0,0 +1,63 @@ +scalbnf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn scalbnf(mut x: f32, mut n: i32) -> f32 {
+    let x1p127 = f32::from_bits(0x7f000000); // 0x1p127f === 2 ^ 127
+    let x1p_126 = f32::from_bits(0x800000); // 0x1p-126f === 2 ^ -126
+    let x1p24 = f32::from_bits(0x4b800000); // 0x1p24f === 2 ^ 24
+
+    if n > 127 {
+        x *= x1p127;
+        n -= 127;
+        if n > 127 {
+            x *= x1p127;
+            n -= 127;
+            if n > 127 {
+                n = 127;
+            }
+        }
+    } else if n < -126 {
+        x *= x1p_126 * x1p24;
+        n += 126 - 24;
+        if n < -126 {
+            x *= x1p_126 * x1p24;
+            n += 126 - 24;
+            if n < -126 {
+                n = -126;
+            }
+        }
+    }
+    x * f32::from_bits(((0x7f + n) as u32) << 23)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sin.rs.html b/src/libm/math/sin.rs.html new file mode 100644 index 000000000..2e177759a --- /dev/null +++ b/src/libm/math/sin.rs.html @@ -0,0 +1,175 @@ +sin.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+
+// origin: FreeBSD /usr/src/lib/msun/src/s_sin.c */
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+
+use super::{k_cos, k_sin, rem_pio2};
+
+// sin(x)
+// Return sine function of x.
+//
+// kernel function:
+//      k_sin            ... sine function on [-pi/4,pi/4]
+//      k_cos            ... cose function on [-pi/4,pi/4]
+//      rem_pio2         ... argument reduction routine
+//
+// Method.
+//      Let S,C and T denote the sin, cos and tan respectively on
+//      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+//      in [-pi/4 , +pi/4], and let n = k mod 4.
+//      We have
+//
+//          n        sin(x)      cos(x)        tan(x)
+//     ----------------------------------------------------------
+//          0          S           C             T
+//          1          C          -S            -1/T
+//          2         -S          -C             T
+//          3         -C           S            -1/T
+//     ----------------------------------------------------------
+//
+// Special cases:
+//      Let trig be any of sin, cos, or tan.
+//      trig(+-INF)  is NaN, with signals;
+//      trig(NaN)    is that NaN;
+//
+// Accuracy:
+//      TRIG(x) returns trig(x) nearly rounded
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn sin(x: f64) -> f64 {
+    let x1p120 = f64::from_bits(0x4770000000000000); // 0x1p120f === 2 ^ 120
+
+    /* High word of x. */
+    let ix = (f64::to_bits(x) >> 32) as u32 & 0x7fffffff;
+
+    /* |x| ~< pi/4 */
+    if ix <= 0x3fe921fb {
+        if ix < 0x3e500000 {
+            /* |x| < 2**-26 */
+            /* raise inexact if x != 0 and underflow if subnormal*/
+            if ix < 0x00100000 {
+                force_eval!(x / x1p120);
+            } else {
+                force_eval!(x + x1p120);
+            }
+            return x;
+        }
+        return k_sin(x, 0.0, 0);
+    }
+
+    /* sin(Inf or NaN) is NaN */
+    if ix >= 0x7ff00000 {
+        return x - x;
+    }
+
+    /* argument reduction needed */
+    let (n, y0, y1) = rem_pio2(x);
+    match n & 3 {
+        0 => k_sin(y0, y1, 1),
+        1 => k_cos(y0, y1),
+        2 => -k_sin(y0, y1, 1),
+        _ => -k_cos(y0, y1),
+    }
+}
+
+#[test]
+fn test_near_pi() {
+    let x = f64::from_bits(0x400921fb000FD5DD); // 3.141592026217707
+    let sx = f64::from_bits(0x3ea50d15ced1a4a2); // 6.273720864039205e-7
+    assert_eq!(sin(x), sx);
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sincos.rs.html b/src/libm/math/sincos.rs.html new file mode 100644 index 000000000..5c92d4c20 --- /dev/null +++ b/src/libm/math/sincos.rs.html @@ -0,0 +1,121 @@ +sincos.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_sin.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{get_high_word, k_cos, k_sin, rem_pio2};
+
+pub fn sincos(x: f64) -> (f64, f64) {
+    let s: f64;
+    let c: f64;
+    let mut ix: u32;
+
+    ix = get_high_word(x);
+    ix &= 0x7fffffff;
+
+    /* |x| ~< pi/4 */
+    if ix <= 0x3fe921fb {
+        /* if |x| < 2**-27 * sqrt(2) */
+        if ix < 0x3e46a09e {
+            /* raise inexact if x!=0 and underflow if subnormal */
+            let x1p120 = f64::from_bits(0x4770000000000000); // 0x1p120 == 2^120
+            if ix < 0x00100000 {
+                force_eval!(x / x1p120);
+            } else {
+                force_eval!(x + x1p120);
+            }
+            return (x, 1.0);
+        }
+        return (k_sin(x, 0.0, 0), k_cos(x, 0.0));
+    }
+
+    /* sincos(Inf or NaN) is NaN */
+    if ix >= 0x7ff00000 {
+        let rv = x - x;
+        return (rv, rv);
+    }
+
+    /* argument reduction needed */
+    let (n, y0, y1) = rem_pio2(x);
+    s = k_sin(y0, y1, 1);
+    c = k_cos(y0, y1);
+    match n & 3 {
+        0 => (s, c),
+        1 => (c, -s),
+        2 => (-s, -c),
+        3 => (-c, s),
+        #[cfg(feature = "checked")]
+        _ => unreachable!(),
+        #[cfg(not(feature = "checked"))]
+        _ => (0.0, 1.0),
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sincosf.rs.html b/src/libm/math/sincosf.rs.html new file mode 100644 index 000000000..fcd3edf43 --- /dev/null +++ b/src/libm/math/sincosf.rs.html @@ -0,0 +1,249 @@ +sincosf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_sinf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{k_cosf, k_sinf, rem_pio2f};
+
+/* Small multiples of pi/2 rounded to double precision. */
+const PI_2: f32 = 0.5 * 3.1415926535897931160E+00;
+const S1PIO2: f32 = 1.0 * PI_2; /* 0x3FF921FB, 0x54442D18 */
+const S2PIO2: f32 = 2.0 * PI_2; /* 0x400921FB, 0x54442D18 */
+const S3PIO2: f32 = 3.0 * PI_2; /* 0x4012D97C, 0x7F3321D2 */
+const S4PIO2: f32 = 4.0 * PI_2; /* 0x401921FB, 0x54442D18 */
+
+pub fn sincosf(x: f32) -> (f32, f32) {
+    let s: f32;
+    let c: f32;
+    let mut ix: u32;
+    let sign: bool;
+
+    ix = x.to_bits();
+    sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+
+    /* |x| ~<= pi/4 */
+    if ix <= 0x3f490fda {
+        /* |x| < 2**-12 */
+        if ix < 0x39800000 {
+            /* raise inexact if x!=0 and underflow if subnormal */
+
+            let x1p120 = f32::from_bits(0x7b800000); // 0x1p120 == 2^120
+            if ix < 0x00100000 {
+                force_eval!(x / x1p120);
+            } else {
+                force_eval!(x + x1p120);
+            }
+            return (x, 1.0);
+        }
+        return (k_sinf(x as f64), k_cosf(x as f64));
+    }
+
+    /* |x| ~<= 5*pi/4 */
+    if ix <= 0x407b53d1 {
+        if ix <= 0x4016cbe3 {
+            /* |x| ~<= 3pi/4 */
+            if sign {
+                s = -k_cosf((x + S1PIO2) as f64);
+                c = k_sinf((x + S1PIO2) as f64);
+            } else {
+                s = k_cosf((S1PIO2 - x) as f64);
+                c = k_sinf((S1PIO2 - x) as f64);
+            }
+        }
+        /* -sin(x+c) is not correct if x+c could be 0: -0 vs +0 */
+        else {
+            if sign {
+                s = k_sinf((x + S2PIO2) as f64);
+                c = k_cosf((x + S2PIO2) as f64);
+            } else {
+                s = k_sinf((x - S2PIO2) as f64);
+                c = k_cosf((x - S2PIO2) as f64);
+            }
+        }
+
+        return (s, c);
+    }
+
+    /* |x| ~<= 9*pi/4 */
+    if ix <= 0x40e231d5 {
+        if ix <= 0x40afeddf {
+            /* |x| ~<= 7*pi/4 */
+            if sign {
+                s = k_cosf((x + S3PIO2) as f64);
+                c = -k_sinf((x + S3PIO2) as f64);
+            } else {
+                s = -k_cosf((x - S3PIO2) as f64);
+                c = k_sinf((x - S3PIO2) as f64);
+            }
+        } else {
+            if sign {
+                s = k_cosf((x + S4PIO2) as f64);
+                c = k_sinf((x + S4PIO2) as f64);
+            } else {
+                s = k_cosf((x - S4PIO2) as f64);
+                c = k_sinf((x - S4PIO2) as f64);
+            }
+        }
+
+        return (s, c);
+    }
+
+    /* sin(Inf or NaN) is NaN */
+    if ix >= 0x7f800000 {
+        let rv = x - x;
+        return (rv, rv);
+    }
+
+    /* general argument reduction needed */
+    let (n, y) = rem_pio2f(x);
+    s = k_sinf(y);
+    c = k_cosf(y);
+    match n & 3 {
+        0 => (s, c),
+        1 => (c, -s),
+        2 => (-s, -c),
+        3 => (-c, s),
+        #[cfg(feature = "checked")]
+        _ => unreachable!(),
+        #[cfg(not(feature = "checked"))]
+        _ => (0.0, 1.0),
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sinf.rs.html b/src/libm/math/sinf.rs.html new file mode 100644 index 000000000..9ad3bcd2d --- /dev/null +++ b/src/libm/math/sinf.rs.html @@ -0,0 +1,191 @@ +sinf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_sinf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{k_cosf, k_sinf, rem_pio2f};
+
+use core::f64::consts::FRAC_PI_2;
+
+/* Small multiples of pi/2 rounded to double precision. */
+const S1_PIO2: f64 = 1. * FRAC_PI_2; /* 0x3FF921FB, 0x54442D18 */
+const S2_PIO2: f64 = 2. * FRAC_PI_2; /* 0x400921FB, 0x54442D18 */
+const S3_PIO2: f64 = 3. * FRAC_PI_2; /* 0x4012D97C, 0x7F3321D2 */
+const S4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn sinf(x: f32) -> f32 {
+    let x64 = x as f64;
+
+    let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
+
+    let mut ix = x.to_bits();
+    let sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+
+    if ix <= 0x3f490fda {
+        /* |x| ~<= pi/4 */
+        if ix < 0x39800000 {
+            /* |x| < 2**-12 */
+            /* raise inexact if x!=0 and underflow if subnormal */
+            force_eval!(if ix < 0x00800000 {
+                x / x1p120
+            } else {
+                x + x1p120
+            });
+            return x;
+        }
+        return k_sinf(x64);
+    }
+    if ix <= 0x407b53d1 {
+        /* |x| ~<= 5*pi/4 */
+        if ix <= 0x4016cbe3 {
+            /* |x| ~<= 3pi/4 */
+            if sign {
+                return -k_cosf(x64 + S1_PIO2);
+            } else {
+                return k_cosf(x64 - S1_PIO2);
+            }
+        }
+        return k_sinf(if sign {
+            -(x64 + S2_PIO2)
+        } else {
+            -(x64 - S2_PIO2)
+        });
+    }
+    if ix <= 0x40e231d5 {
+        /* |x| ~<= 9*pi/4 */
+        if ix <= 0x40afeddf {
+            /* |x| ~<= 7*pi/4 */
+            if sign {
+                return k_cosf(x64 + S3_PIO2);
+            } else {
+                return -k_cosf(x64 - S3_PIO2);
+            }
+        }
+        return k_sinf(if sign { x64 + S4_PIO2 } else { x64 - S4_PIO2 });
+    }
+
+    /* sin(Inf or NaN) is NaN */
+    if ix >= 0x7f800000 {
+        return x - x;
+    }
+
+    /* general argument reduction needed */
+    let (n, y) = rem_pio2f(x);
+    match n & 3 {
+        0 => k_sinf(y),
+        1 => k_cosf(y),
+        2 => k_sinf(-y),
+        _ => -k_cosf(y),
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sinh.rs.html b/src/libm/math/sinh.rs.html new file mode 100644 index 000000000..4395a5b92 --- /dev/null +++ b/src/libm/math/sinh.rs.html @@ -0,0 +1,103 @@ +sinh.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+
+use super::{expm1, expo2};
+
+// sinh(x) = (exp(x) - 1/exp(x))/2
+//         = (exp(x)-1 + (exp(x)-1)/exp(x))/2
+//         = x + x^3/6 + o(x^5)
+//
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn sinh(x: f64) -> f64 {
+    // union {double f; uint64_t i;} u = {.f = x};
+    // uint32_t w;
+    // double t, h, absx;
+
+    let mut uf: f64 = x;
+    let mut ui: u64 = f64::to_bits(uf);
+    let w: u32;
+    let t: f64;
+    let mut h: f64;
+    let absx: f64;
+
+    h = 0.5;
+    if ui >> 63 != 0 {
+        h = -h;
+    }
+    /* |x| */
+    ui &= !1 / 2;
+    uf = f64::from_bits(ui);
+    absx = uf;
+    w = (ui >> 32) as u32;
+
+    /* |x| < log(DBL_MAX) */
+    if w < 0x40862e42 {
+        t = expm1(absx);
+        if w < 0x3ff00000 {
+            if w < 0x3ff00000 - (26 << 20) {
+                /* note: inexact and underflow are raised by expm1 */
+                /* note: this branch avoids spurious underflow */
+                return x;
+            }
+            return h * (2.0 * t - t * t / (t + 1.0));
+        }
+        /* note: |x|>log(0x1p26)+eps could be just h*exp(x) */
+        return h * (t + t / (t + 1.0));
+    }
+
+    /* |x| > log(DBL_MAX) or nan */
+    /* note: the result is stored to handle overflow */
+    t = 2.0 * h * expo2(absx);
+    t
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sinhf.rs.html b/src/libm/math/sinhf.rs.html new file mode 100644 index 000000000..f0b89d5d0 --- /dev/null +++ b/src/libm/math/sinhf.rs.html @@ -0,0 +1,65 @@ +sinhf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+
+use super::expm1f;
+use super::k_expo2f;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn sinhf(x: f32) -> f32 {
+    let mut h = 0.5f32;
+    let mut ix = x.to_bits();
+    if (ix >> 31) != 0 {
+        h = -h;
+    }
+    /* |x| */
+    ix &= 0x7fffffff;
+    let absx = f32::from_bits(ix);
+    let w = ix;
+
+    /* |x| < log(FLT_MAX) */
+    if w < 0x42b17217 {
+        let t = expm1f(absx);
+        if w < 0x3f800000 {
+            if w < (0x3f800000 - (12 << 23)) {
+                return x;
+            }
+            return h * (2. * t - t * t / (t + 1.));
+        }
+        return h * (t + t / (t + 1.));
+    }
+
+    /* |x| > logf(FLT_MAX) or nan */
+    2. * h * k_expo2f(absx)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sqrt.rs.html b/src/libm/math/sqrt.rs.html new file mode 100644 index 000000000..49ae63fb6 --- /dev/null +++ b/src/libm/math/sqrt.rs.html @@ -0,0 +1,449 @@ +sqrt.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrt.c */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/* sqrt(x)
+ * Return correctly rounded sqrt.
+ *           ------------------------------------------
+ *           |  Use the hardware sqrt if you have one |
+ *           ------------------------------------------
+ * Method:
+ *   Bit by bit method using integer arithmetic. (Slow, but portable)
+ *   1. Normalization
+ *      Scale x to y in [1,4) with even powers of 2:
+ *      find an integer k such that  1 <= (y=x*2^(2k)) < 4, then
+ *              sqrt(x) = 2^k * sqrt(y)
+ *   2. Bit by bit computation
+ *      Let q  = sqrt(y) truncated to i bit after binary point (q = 1),
+ *           i                                                   0
+ *                                     i+1         2
+ *          s  = 2*q , and      y  =  2   * ( y - q  ).         (1)
+ *           i      i            i                 i
+ *
+ *      To compute q    from q , one checks whether
+ *                  i+1       i
+ *
+ *                            -(i+1) 2
+ *                      (q + 2      ) <= y.                     (2)
+ *                        i
+ *                                                            -(i+1)
+ *      If (2) is false, then q   = q ; otherwise q   = q  + 2      .
+ *                             i+1   i             i+1   i
+ *
+ *      With some algebric manipulation, it is not difficult to see
+ *      that (2) is equivalent to
+ *                             -(i+1)
+ *                      s  +  2       <= y                      (3)
+ *                       i                i
+ *
+ *      The advantage of (3) is that s  and y  can be computed by
+ *                                    i      i
+ *      the following recurrence formula:
+ *          if (3) is false
+ *
+ *          s     =  s  ,       y    = y   ;                    (4)
+ *           i+1      i          i+1    i
+ *
+ *          otherwise,
+ *                         -i                     -(i+1)
+ *          s     =  s  + 2  ,  y    = y  -  s  - 2             (5)
+ *           i+1      i          i+1    i     i
+ *
+ *      One may easily use induction to prove (4) and (5).
+ *      Note. Since the left hand side of (3) contain only i+2 bits,
+ *            it does not necessary to do a full (53-bit) comparison
+ *            in (3).
+ *   3. Final rounding
+ *      After generating the 53 bits result, we compute one more bit.
+ *      Together with the remainder, we can decide whether the
+ *      result is exact, bigger than 1/2ulp, or less than 1/2ulp
+ *      (it will never equal to 1/2ulp).
+ *      The rounding mode can be detected by checking whether
+ *      huge + tiny is equal to huge, and whether huge - tiny is
+ *      equal to huge for some floating point number "huge" and "tiny".
+ *
+ * Special cases:
+ *      sqrt(+-0) = +-0         ... exact
+ *      sqrt(inf) = inf
+ *      sqrt(-ve) = NaN         ... with invalid signal
+ *      sqrt(NaN) = NaN         ... with invalid signal for signaling NaN
+ */
+
+use core::f64;
+use core::num::Wrapping;
+
+const TINY: f64 = 1.0e-300;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn sqrt(x: f64) -> f64 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f64.sqrt` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return if x < 0.0 {
+                f64::NAN
+            } else {
+                unsafe { ::core::intrinsics::sqrtf64(x) }
+            }
+        }
+    }
+    let mut z: f64;
+    let sign: Wrapping<u32> = Wrapping(0x80000000);
+    let mut ix0: i32;
+    let mut s0: i32;
+    let mut q: i32;
+    let mut m: i32;
+    let mut t: i32;
+    let mut i: i32;
+    let mut r: Wrapping<u32>;
+    let mut t1: Wrapping<u32>;
+    let mut s1: Wrapping<u32>;
+    let mut ix1: Wrapping<u32>;
+    let mut q1: Wrapping<u32>;
+
+    ix0 = (x.to_bits() >> 32) as i32;
+    ix1 = Wrapping(x.to_bits() as u32);
+
+    /* take care of Inf and NaN */
+    if (ix0 & 0x7ff00000) == 0x7ff00000 {
+        return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */
+    }
+    /* take care of zero */
+    if ix0 <= 0 {
+        if ((ix0 & !(sign.0 as i32)) | ix1.0 as i32) == 0 {
+            return x; /* sqrt(+-0) = +-0 */
+        }
+        if ix0 < 0 {
+            return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+        }
+    }
+    /* normalize x */
+    m = ix0 >> 20;
+    if m == 0 {
+        /* subnormal x */
+        while ix0 == 0 {
+            m -= 21;
+            ix0 |= (ix1 >> 11).0 as i32;
+            ix1 <<= 21;
+        }
+        i = 0;
+        while (ix0 & 0x00100000) == 0 {
+            i += 1;
+            ix0 <<= 1;
+        }
+        m -= i - 1;
+        ix0 |= (ix1 >> (32 - i) as usize).0 as i32;
+        ix1 = ix1 << i as usize;
+    }
+    m -= 1023; /* unbias exponent */
+    ix0 = (ix0 & 0x000fffff) | 0x00100000;
+    if (m & 1) == 1 {
+        /* odd m, double x to make it even */
+        ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
+        ix1 += ix1;
+    }
+    m >>= 1; /* m = [m/2] */
+
+    /* generate sqrt(x) bit by bit */
+    ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
+    ix1 += ix1;
+    q = 0; /* [q,q1] = sqrt(x) */
+    q1 = Wrapping(0);
+    s0 = 0;
+    s1 = Wrapping(0);
+    r = Wrapping(0x00200000); /* r = moving bit from right to left */
+
+    while r != Wrapping(0) {
+        t = s0 + r.0 as i32;
+        if t <= ix0 {
+            s0 = t + r.0 as i32;
+            ix0 -= t;
+            q += r.0 as i32;
+        }
+        ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
+        ix1 += ix1;
+        r >>= 1;
+    }
+
+    r = sign;
+    while r != Wrapping(0) {
+        t1 = s1 + r;
+        t = s0;
+        if t < ix0 || (t == ix0 && t1 <= ix1) {
+            s1 = t1 + r;
+            if (t1 & sign) == sign && (s1 & sign) == Wrapping(0) {
+                s0 += 1;
+            }
+            ix0 -= t;
+            if ix1 < t1 {
+                ix0 -= 1;
+            }
+            ix1 -= t1;
+            q1 += r;
+        }
+        ix0 += ix0 + ((ix1 & sign) >> 31).0 as i32;
+        ix1 += ix1;
+        r >>= 1;
+    }
+
+    /* use floating add to find out rounding direction */
+    if (ix0 as u32 | ix1.0) != 0 {
+        z = 1.0 - TINY; /* raise inexact flag */
+        if z >= 1.0 {
+            z = 1.0 + TINY;
+            if q1.0 == 0xffffffff {
+                q1 = Wrapping(0);
+                q += 1;
+            } else if z > 1.0 {
+                if q1.0 == 0xfffffffe {
+                    q += 1;
+                }
+                q1 += Wrapping(2);
+            } else {
+                q1 += q1 & Wrapping(1);
+            }
+        }
+    }
+    ix0 = (q >> 1) + 0x3fe00000;
+    ix1 = q1 >> 1;
+    if (q & 1) == 1 {
+        ix1 |= sign;
+    }
+    ix0 += m << 20;
+    f64::from_bits((ix0 as u64) << 32 | ix1.0 as u64)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/sqrtf.rs.html b/src/libm/math/sqrtf.rs.html new file mode 100644 index 000000000..348a68787 --- /dev/null +++ b/src/libm/math/sqrtf.rs.html @@ -0,0 +1,227 @@ +sqrtf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+
+/* origin: FreeBSD /usr/src/lib/msun/src/e_sqrtf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+const TINY: f32 = 1.0e-30;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn sqrtf(x: f32) -> f32 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f32.sqrt` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return if x < 0.0 {
+                ::core::f32::NAN
+            } else {
+                unsafe { ::core::intrinsics::sqrtf32(x) }
+            }
+        }
+    }
+    let mut z: f32;
+    let sign: i32 = 0x80000000u32 as i32;
+    let mut ix: i32;
+    let mut s: i32;
+    let mut q: i32;
+    let mut m: i32;
+    let mut t: i32;
+    let mut i: i32;
+    let mut r: u32;
+
+    ix = x.to_bits() as i32;
+
+    /* take care of Inf and NaN */
+    if (ix as u32 & 0x7f800000) == 0x7f800000 {
+        return x * x + x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf, sqrt(-inf)=sNaN */
+    }
+
+    /* take care of zero */
+    if ix <= 0 {
+        if (ix & !sign) == 0 {
+            return x; /* sqrt(+-0) = +-0 */
+        }
+        if ix < 0 {
+            return (x - x) / (x - x); /* sqrt(-ve) = sNaN */
+        }
+    }
+
+    /* normalize x */
+    m = ix >> 23;
+    if m == 0 {
+        /* subnormal x */
+        i = 0;
+        while ix & 0x00800000 == 0 {
+            ix <<= 1;
+            i = i + 1;
+        }
+        m -= i - 1;
+    }
+    m -= 127; /* unbias exponent */
+    ix = (ix & 0x007fffff) | 0x00800000;
+    if m & 1 == 1 {
+        /* odd m, double x to make it even */
+        ix += ix;
+    }
+    m >>= 1; /* m = [m/2] */
+
+    /* generate sqrt(x) bit by bit */
+    ix += ix;
+    q = 0;
+    s = 0;
+    r = 0x01000000; /* r = moving bit from right to left */
+
+    while r != 0 {
+        t = s + r as i32;
+        if t <= ix {
+            s = t + r as i32;
+            ix -= t;
+            q += r as i32;
+        }
+        ix += ix;
+        r >>= 1;
+    }
+
+    /* use floating add to find out rounding direction */
+    if ix != 0 {
+        z = 1.0 - TINY; /* raise inexact flag */
+        if z >= 1.0 {
+            z = 1.0 + TINY;
+            if z > 1.0 {
+                q += 2;
+            } else {
+                q += q & 1;
+            }
+        }
+    }
+
+    ix = (q >> 1) + 0x3f000000;
+    ix += m << 23;
+    f32::from_bits(ix as u32)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/tan.rs.html b/src/libm/math/tan.rs.html new file mode 100644 index 000000000..a7a05be9a --- /dev/null +++ b/src/libm/math/tan.rs.html @@ -0,0 +1,145 @@ +tan.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+
+// origin: FreeBSD /usr/src/lib/msun/src/s_tan.c */
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunPro, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+
+use super::{k_tan, rem_pio2};
+
+// tan(x)
+// Return tangent function of x.
+//
+// kernel function:
+//      k_tan           ... tangent function on [-pi/4,pi/4]
+//      rem_pio2        ... argument reduction routine
+//
+// Method.
+//      Let S,C and T denote the sin, cos and tan respectively on
+//      [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+//      in [-pi/4 , +pi/4], and let n = k mod 4.
+//      We have
+//
+//          n        sin(x)      cos(x)        tan(x)
+//     ----------------------------------------------------------
+//          0          S           C             T
+//          1          C          -S            -1/T
+//          2         -S          -C             T
+//          3         -C           S            -1/T
+//     ----------------------------------------------------------
+//
+// Special cases:
+//      Let trig be any of sin, cos, or tan.
+//      trig(+-INF)  is NaN, with signals;
+//      trig(NaN)    is that NaN;
+//
+// Accuracy:
+//      TRIG(x) returns trig(x) nearly rounded
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn tan(x: f64) -> f64 {
+    let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
+
+    let ix = (f64::to_bits(x) >> 32) as u32 & 0x7fffffff;
+    /* |x| ~< pi/4 */
+    if ix <= 0x3fe921fb {
+        if ix < 0x3e400000 {
+            /* |x| < 2**-27 */
+            /* raise inexact if x!=0 and underflow if subnormal */
+            force_eval!(if ix < 0x00100000 {
+                x / x1p120 as f64
+            } else {
+                x + x1p120 as f64
+            });
+            return x;
+        }
+        return k_tan(x, 0.0, 0);
+    }
+
+    /* tan(Inf or NaN) is NaN */
+    if ix >= 0x7ff00000 {
+        return x - x;
+    }
+
+    /* argument reduction */
+    let (n, y0, y1) = rem_pio2(x);
+    k_tan(y0, y1, n & 1)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/tanf.rs.html b/src/libm/math/tanf.rs.html new file mode 100644 index 000000000..9eba1280b --- /dev/null +++ b/src/libm/math/tanf.rs.html @@ -0,0 +1,161 @@ +tanf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+
+/* origin: FreeBSD /usr/src/lib/msun/src/s_tanf.c */
+/*
+ * Conversion to float by Ian Lance Taylor, Cygnus Support, ian@cygnus.com.
+ * Optimized by Bruce D. Evans.
+ */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+use super::{k_tanf, rem_pio2f};
+
+use core::f64::consts::FRAC_PI_2;
+
+/* Small multiples of pi/2 rounded to double precision. */
+const T1_PIO2: f64 = 1. * FRAC_PI_2; /* 0x3FF921FB, 0x54442D18 */
+const T2_PIO2: f64 = 2. * FRAC_PI_2; /* 0x400921FB, 0x54442D18 */
+const T3_PIO2: f64 = 3. * FRAC_PI_2; /* 0x4012D97C, 0x7F3321D2 */
+const T4_PIO2: f64 = 4. * FRAC_PI_2; /* 0x401921FB, 0x54442D18 */
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn tanf(x: f32) -> f32 {
+    let x64 = x as f64;
+
+    let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
+
+    let mut ix = x.to_bits();
+    let sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+
+    if ix <= 0x3f490fda {
+        /* |x| ~<= pi/4 */
+        if ix < 0x39800000 {
+            /* |x| < 2**-12 */
+            /* raise inexact if x!=0 and underflow if subnormal */
+            force_eval!(if ix < 0x00800000 {
+                x / x1p120
+            } else {
+                x + x1p120
+            });
+            return x;
+        }
+        return k_tanf(x64, false);
+    }
+    if ix <= 0x407b53d1 {
+        /* |x| ~<= 5*pi/4 */
+        if ix <= 0x4016cbe3 {
+            /* |x| ~<= 3pi/4 */
+            return k_tanf(if sign { x64 + T1_PIO2 } else { x64 - T1_PIO2 }, true);
+        } else {
+            return k_tanf(if sign { x64 + T2_PIO2 } else { x64 - T2_PIO2 }, false);
+        }
+    }
+    if ix <= 0x40e231d5 {
+        /* |x| ~<= 9*pi/4 */
+        if ix <= 0x40afeddf {
+            /* |x| ~<= 7*pi/4 */
+            return k_tanf(if sign { x64 + T3_PIO2 } else { x64 - T3_PIO2 }, true);
+        } else {
+            return k_tanf(if sign { x64 + T4_PIO2 } else { x64 - T4_PIO2 }, false);
+        }
+    }
+
+    /* tan(Inf or NaN) is NaN */
+    if ix >= 0x7f800000 {
+        return x - x;
+    }
+
+    /* argument reduction */
+    let (n, y) = rem_pio2f(x);
+    k_tanf(y, n & 1 != 0)
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/tanh.rs.html b/src/libm/math/tanh.rs.html new file mode 100644 index 000000000..020b92b48 --- /dev/null +++ b/src/libm/math/tanh.rs.html @@ -0,0 +1,111 @@ +tanh.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+
+use super::expm1;
+
+/* tanh(x) = (exp(x) - exp(-x))/(exp(x) + exp(-x))
+ *         = (exp(2*x) - 1)/(exp(2*x) - 1 + 2)
+ *         = (1 - exp(-2*x))/(exp(-2*x) - 1 + 2)
+ */
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn tanh(mut x: f64) -> f64 {
+    let mut uf: f64 = x;
+    let mut ui: u64 = f64::to_bits(uf);
+
+    let w: u32;
+    let sign: bool;
+    let mut t: f64;
+
+    /* x = |x| */
+    sign = ui >> 63 != 0;
+    ui &= !1 / 2;
+    uf = f64::from_bits(ui);
+    x = uf;
+    w = (ui >> 32) as u32;
+
+    if w > 0x3fe193ea {
+        /* |x| > log(3)/2 ~= 0.5493 or nan */
+        if w > 0x40340000 {
+            /* |x| > 20 or nan */
+            /* note: this branch avoids raising overflow */
+            t = 1.0 - 0.0 / x;
+        } else {
+            t = expm1(2.0 * x);
+            t = 1.0 - 2.0 / (t + 2.0);
+        }
+    } else if w > 0x3fd058ae {
+        /* |x| > log(5/3)/2 ~= 0.2554 */
+        t = expm1(2.0 * x);
+        t = t / (t + 2.0);
+    } else if w >= 0x00100000 {
+        /* |x| >= 0x1p-1022, up to 2ulp error in [0.1,0.2554] */
+        t = expm1(-2.0 * x);
+        t = -t / (t + 2.0);
+    } else {
+        /* |x| is subnormal */
+        /* note: the branch above would not raise underflow in [0x1p-1023,0x1p-1022) */
+        force_eval!(x as f32);
+        t = x;
+    }
+
+    if sign {
+        -t
+    } else {
+        t
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/tanhf.rs.html b/src/libm/math/tanhf.rs.html new file mode 100644 index 000000000..8066b4059 --- /dev/null +++ b/src/libm/math/tanhf.rs.html @@ -0,0 +1,83 @@ +tanhf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+
+use super::expm1f;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn tanhf(mut x: f32) -> f32 {
+    /* x = |x| */
+    let mut ix = x.to_bits();
+    let sign = (ix >> 31) != 0;
+    ix &= 0x7fffffff;
+    x = f32::from_bits(ix);
+    let w = ix;
+
+    let tt = if w > 0x3f0c9f54 {
+        /* |x| > log(3)/2 ~= 0.5493 or nan */
+        if w > 0x41200000 {
+            /* |x| > 10 */
+            1. + 0. / x
+        } else {
+            let t = expm1f(2. * x);
+            1. - 2. / (t + 2.)
+        }
+    } else if w > 0x3e82c578 {
+        /* |x| > log(5/3)/2 ~= 0.2554 */
+        let t = expm1f(2. * x);
+        t / (t + 2.)
+    } else if w >= 0x00800000 {
+        /* |x| >= 0x1p-126 */
+        let t = expm1f(-2. * x);
+        -t / (t + 2.)
+    } else {
+        /* |x| is subnormal */
+        force_eval!(x * x);
+        x
+    };
+    if sign {
+        -tt
+    } else {
+        tt
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/tgamma.rs.html b/src/libm/math/tgamma.rs.html new file mode 100644 index 000000000..3751b87e0 --- /dev/null +++ b/src/libm/math/tgamma.rs.html @@ -0,0 +1,417 @@ +tgamma.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+
+/*
+"A Precision Approximation of the Gamma Function" - Cornelius Lanczos (1964)
+"Lanczos Implementation of the Gamma Function" - Paul Godfrey (2001)
+"An Analysis of the Lanczos Gamma Approximation" - Glendon Ralph Pugh (2004)
+
+approximation method:
+
+                        (x - 0.5)         S(x)
+Gamma(x) = (x + g - 0.5)         *  ----------------
+                                    exp(x + g - 0.5)
+
+with
+                 a1      a2      a3            aN
+S(x) ~= [ a0 + ----- + ----- + ----- + ... + ----- ]
+               x + 1   x + 2   x + 3         x + N
+
+with a0, a1, a2, a3,.. aN constants which depend on g.
+
+for x < 0 the following reflection formula is used:
+
+Gamma(x)*Gamma(-x) = -pi/(x sin(pi x))
+
+most ideas and constants are from boost and python
+*/
+extern crate core;
+use super::{exp, floor, k_cos, k_sin, pow};
+
+const PI: f64 = 3.141592653589793238462643383279502884;
+
+/* sin(pi x) with x > 0x1p-100, if sin(pi*x)==0 the sign is arbitrary */
+fn sinpi(mut x: f64) -> f64 {
+    let mut n: isize;
+
+    /* argument reduction: x = |x| mod 2 */
+    /* spurious inexact when x is odd int */
+    x = x * 0.5;
+    x = 2.0 * (x - floor(x));
+
+    /* reduce x into [-.25,.25] */
+    n = (4.0 * x) as isize;
+    n = (n + 1) / 2;
+    x -= (n as f64) * 0.5;
+
+    x *= PI;
+    match n {
+        1 => k_cos(x, 0.0),
+        2 => k_sin(-x, 0.0, 0),
+        3 => -k_cos(x, 0.0),
+        0 | _ => k_sin(x, 0.0, 0),
+    }
+}
+
+const N: usize = 12;
+//static const double g = 6.024680040776729583740234375;
+const GMHALF: f64 = 5.524680040776729583740234375;
+const SNUM: [f64; N + 1] = [
+    23531376880.410759688572007674451636754734846804940,
+    42919803642.649098768957899047001988850926355848959,
+    35711959237.355668049440185451547166705960488635843,
+    17921034426.037209699919755754458931112671403265390,
+    6039542586.3520280050642916443072979210699388420708,
+    1439720407.3117216736632230727949123939715485786772,
+    248874557.86205415651146038641322942321632125127801,
+    31426415.585400194380614231628318205362874684987640,
+    2876370.6289353724412254090516208496135991145378768,
+    186056.26539522349504029498971604569928220784236328,
+    8071.6720023658162106380029022722506138218516325024,
+    210.82427775157934587250973392071336271166969580291,
+    2.5066282746310002701649081771338373386264310793408,
+];
+const SDEN: [f64; N + 1] = [
+    0.0,
+    39916800.0,
+    120543840.0,
+    150917976.0,
+    105258076.0,
+    45995730.0,
+    13339535.0,
+    2637558.0,
+    357423.0,
+    32670.0,
+    1925.0,
+    66.0,
+    1.0,
+];
+/* n! for small integer n */
+const FACT: [f64; 23] = [
+    1.0,
+    1.0,
+    2.0,
+    6.0,
+    24.0,
+    120.0,
+    720.0,
+    5040.0,
+    40320.0,
+    362880.0,
+    3628800.0,
+    39916800.0,
+    479001600.0,
+    6227020800.0,
+    87178291200.0,
+    1307674368000.0,
+    20922789888000.0,
+    355687428096000.0,
+    6402373705728000.0,
+    121645100408832000.0,
+    2432902008176640000.0,
+    51090942171709440000.0,
+    1124000727777607680000.0,
+];
+
+/* S(x) rational function for positive x */
+fn s(x: f64) -> f64 {
+    let mut num: f64 = 0.0;
+    let mut den: f64 = 0.0;
+
+    /* to avoid overflow handle large x differently */
+    if x < 8.0 {
+        for i in (0..=N).rev() {
+            num = num * x + SNUM[i];
+            den = den * x + SDEN[i];
+        }
+    } else {
+        for i in 0..=N {
+            num = num / x + SNUM[i];
+            den = den / x + SDEN[i];
+        }
+    }
+    return num / den;
+}
+
+pub fn tgamma(mut x: f64) -> f64 {
+    let u: u64 = x.to_bits();
+    let absx: f64;
+    let mut y: f64;
+    let mut dy: f64;
+    let mut z: f64;
+    let mut r: f64;
+    let ix: u32 = ((u >> 32) as u32) & 0x7fffffff;
+    let sign: bool = (u >> 63) != 0;
+
+    /* special cases */
+    if ix >= 0x7ff00000 {
+        /* tgamma(nan)=nan, tgamma(inf)=inf, tgamma(-inf)=nan with invalid */
+        return x + core::f64::INFINITY;
+    }
+    if ix < ((0x3ff - 54) << 20) {
+        /* |x| < 2^-54: tgamma(x) ~ 1/x, +-0 raises div-by-zero */
+        return 1.0 / x;
+    }
+
+    /* integer arguments */
+    /* raise inexact when non-integer */
+    if x == floor(x) {
+        if sign {
+            return 0.0 / 0.0;
+        }
+        if x <= FACT.len() as f64 {
+            return FACT[(x as usize) - 1];
+        }
+    }
+
+    /* x >= 172: tgamma(x)=inf with overflow */
+    /* x =< -184: tgamma(x)=+-0 with underflow */
+    if ix >= 0x40670000 {
+        /* |x| >= 184 */
+        if sign {
+            let x1p_126 = f64::from_bits(0x3810000000000000); // 0x1p-126 == 2^-126
+            force_eval!((x1p_126 / x) as f32);
+            if floor(x) * 0.5 == floor(x * 0.5) {
+                return 0.0;
+            } else {
+                return -0.0;
+            }
+        }
+        let x1p1023 = f64::from_bits(0x7fe0000000000000); // 0x1p1023 == 2^1023
+        x *= x1p1023;
+        return x;
+    }
+
+    absx = if sign { -x } else { x };
+
+    /* handle the error of x + g - 0.5 */
+    y = absx + GMHALF;
+    if absx > GMHALF {
+        dy = y - absx;
+        dy -= GMHALF;
+    } else {
+        dy = y - GMHALF;
+        dy -= absx;
+    }
+
+    z = absx - 0.5;
+    r = s(absx) * exp(-y);
+    if x < 0.0 {
+        /* reflection formula for negative x */
+        /* sinpi(absx) is not 0, integers are already handled */
+        r = -PI / (sinpi(absx) * absx * r);
+        dy = -dy;
+        z = -z;
+    }
+    r += dy * (GMHALF + 0.5) * r / y;
+    z = pow(y, 0.5 * z);
+    y = r * z * z;
+    return y;
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/tgammaf.rs.html b/src/libm/math/tgammaf.rs.html new file mode 100644 index 000000000..cba6c5435 --- /dev/null +++ b/src/libm/math/tgammaf.rs.html @@ -0,0 +1,13 @@ +tgammaf.rs.html -- source
1
+2
+3
+4
+5
+
+use super::tgamma;
+
+pub fn tgammaf(x: f32) -> f32 {
+    tgamma(x as f64) as f32
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/trunc.rs.html b/src/libm/math/trunc.rs.html new file mode 100644 index 000000000..cf67f2344 --- /dev/null +++ b/src/libm/math/trunc.rs.html @@ -0,0 +1,85 @@ +trunc.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+
+use core::f64;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn trunc(x: f64) -> f64 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f64.trunc` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::truncf64(x) }
+        }
+    }
+    let x1p120 = f64::from_bits(0x4770000000000000); // 0x1p120f === 2 ^ 120
+
+    let mut i: u64 = x.to_bits();
+    let mut e: i64 = (i >> 52 & 0x7ff) as i64 - 0x3ff + 12;
+    let m: u64;
+
+    if e >= 52 + 12 {
+        return x;
+    }
+    if e < 12 {
+        e = 1;
+    }
+    m = -1i64 as u64 >> e;
+    if (i & m) == 0 {
+        return x;
+    }
+    force_eval!(x + x1p120);
+    i &= !m;
+    f64::from_bits(i)
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn sanity_check() {
+        assert_eq!(super::trunc(1.1), 1.0);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/libm/math/truncf.rs.html b/src/libm/math/truncf.rs.html new file mode 100644 index 000000000..6e02a1225 --- /dev/null +++ b/src/libm/math/truncf.rs.html @@ -0,0 +1,85 @@ +truncf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+
+use core::f32;
+
+#[inline]
+#[cfg_attr(all(test, assert_no_panic), no_panic::no_panic)]
+pub fn truncf(x: f32) -> f32 {
+    // On wasm32 we know that LLVM's intrinsic will compile to an optimized
+    // `f32.trunc` native instruction, so we can leverage this for both code size
+    // and speed.
+    llvm_intrinsically_optimized! {
+        #[cfg(target_arch = "wasm32")] {
+            return unsafe { ::core::intrinsics::truncf32(x) }
+        }
+    }
+    let x1p120 = f32::from_bits(0x7b800000); // 0x1p120f === 2 ^ 120
+
+    let mut i: u32 = x.to_bits();
+    let mut e: i32 = (i >> 23 & 0xff) as i32 - 0x7f + 9;
+    let m: u32;
+
+    if e >= 23 + 9 {
+        return x;
+    }
+    if e < 9 {
+        e = 1;
+    }
+    m = -1i32 as u32 >> e;
+    if (i & m) == 0 {
+        return x;
+    }
+    force_eval!(x + x1p120);
+    i &= !m;
+    f32::from_bits(i)
+}
+
+#[cfg(test)]
+mod tests {
+    #[test]
+    fn sanity_check() {
+        assert_eq!(super::truncf(1.1), 1.0);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api.rs.html b/src/packed_simd/api.rs.html new file mode 100644 index 000000000..a720c89fb --- /dev/null +++ b/src/packed_simd/api.rs.html @@ -0,0 +1,621 @@ +api.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+
+//! Implements the Simd<[T; N]> APIs
+
+#[macro_use]
+mod bitmask;
+crate mod cast;
+#[macro_use]
+mod cmp;
+#[macro_use]
+mod default;
+#[macro_use]
+mod fmt;
+#[macro_use]
+mod from;
+#[macro_use]
+mod hash;
+#[macro_use]
+mod math;
+#[macro_use]
+mod minimal;
+#[macro_use]
+mod ops;
+#[macro_use]
+mod ptr;
+#[macro_use]
+mod reductions;
+#[macro_use]
+mod select;
+#[macro_use]
+mod shuffle;
+#[macro_use]
+mod shuffle1_dyn;
+#[macro_use]
+mod slice;
+#[macro_use]
+mod swap_bytes;
+#[macro_use]
+mod bit_manip;
+
+#[cfg(feature = "into_bits")]
+crate mod into_bits;
+
+macro_rules! impl_i {
+    ([$elem_ty:ident; $elem_n:expr]: $tuple_id:ident, $mask_ty:ident
+     | $ielem_ty:ident, $ibitmask_ty:ident | $test_tt:tt | $($elem_ids:ident),*
+     | From: $($from_vec_ty:ident),* | $(#[$doc:meta])*) => {
+        impl_minimal_iuf!([$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+                          | $($elem_ids),* | $(#[$doc])*);
+        impl_ops_vector_arithmetic!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_scalar_arithmetic!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (!(0 as $elem_ty), 0)
+        );
+        impl_ops_scalar_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (!(0 as $elem_ty), 0)
+        );
+        impl_ops_vector_shifts!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_scalar_shifts!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_rotates!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_neg!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_int_min_max!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt
+        );
+        impl_reduction_integer_arithmetic!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+        );
+        impl_reduction_min_max!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+        );
+        impl_reduction_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+            | (|x|{ x as $elem_ty }) | (!(0 as $elem_ty), 0)
+        );
+        impl_fmt_debug!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_lower_hex!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_upper_hex!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_octal!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_binary!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_from_array!([$elem_ty; $elem_n]: $tuple_id | $test_tt | (1, 1));
+        impl_from_vectors!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | $($from_vec_ty),*
+        );
+        impl_default!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_hash!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_slice_from_slice!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_slice_write_to_slice!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_swap_bytes!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_bit_manip!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_shuffle1_dyn!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_partial_eq!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (0, 1)
+        );
+        impl_cmp_eq!([$elem_ty; $elem_n]: $tuple_id | $test_tt | (0, 1));
+        impl_cmp_vertical!(
+            [$elem_ty; $elem_n]: $tuple_id, $mask_ty, false, (1, 0) | $test_tt
+        );
+        impl_cmp_partial_ord!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_ord!([$elem_ty; $elem_n]: $tuple_id | $test_tt | (0, 1));
+        impl_bitmask!($tuple_id | $ibitmask_ty | (-1, 0) | $test_tt);
+
+        test_select!($elem_ty, $mask_ty, $tuple_id, (1, 2) | $test_tt);
+        test_cmp_partial_ord_int!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        test_shuffle1_dyn!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+    }
+}
+
+macro_rules! impl_u {
+    ([$elem_ty:ident; $elem_n:expr]: $tuple_id:ident, $mask_ty:ident
+     | $ielem_ty:ident, $ibitmask_ty:ident | $test_tt:tt | $($elem_ids:ident),*
+     | From: $($from_vec_ty:ident),* | $(#[$doc:meta])*) => {
+        impl_minimal_iuf!([$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+                          | $($elem_ids),* | $(#[$doc])*);
+        impl_ops_vector_arithmetic!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_scalar_arithmetic!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (!(0 as $elem_ty), 0)
+        );
+        impl_ops_scalar_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (!(0 as $elem_ty), 0)
+        );
+        impl_ops_vector_shifts!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_scalar_shifts!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_rotates!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_int_min_max!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt
+        );
+        impl_reduction_integer_arithmetic!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+        );
+        impl_reduction_min_max!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+        );
+        impl_reduction_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+            | (|x|{ x as $elem_ty }) | (!(0 as $elem_ty), 0)
+        );
+        impl_fmt_debug!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_lower_hex!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_upper_hex!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_octal!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_binary!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_from_array!([$elem_ty; $elem_n]: $tuple_id | $test_tt | (1, 1));
+        impl_from_vectors!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | $($from_vec_ty),*
+        );
+        impl_default!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_hash!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_slice_from_slice!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_slice_write_to_slice!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_swap_bytes!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_bit_manip!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_shuffle1_dyn!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_partial_eq!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (1, 0)
+        );
+        impl_cmp_eq!([$elem_ty; $elem_n]: $tuple_id | $test_tt | (0, 1));
+        impl_cmp_vertical!(
+            [$elem_ty; $elem_n]: $tuple_id, $mask_ty, false, (1, 0) | $test_tt
+        );
+        impl_cmp_partial_ord!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_ord!([$elem_ty; $elem_n]: $tuple_id | $test_tt | (0, 1));
+        impl_bitmask!($tuple_id | $ibitmask_ty | ($ielem_ty::max_value(), 0) |
+                      $test_tt);
+
+        test_select!($elem_ty, $mask_ty, $tuple_id, (1, 2) | $test_tt);
+        test_cmp_partial_ord_int!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        test_shuffle1_dyn!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+    }
+}
+
+macro_rules! impl_f {
+    ([$elem_ty:ident; $elem_n:expr]: $tuple_id:ident, $mask_ty:ident
+     | $ielem_ty:ident | $test_tt:tt | $($elem_ids:ident),*
+     | From: $($from_vec_ty:ident),* | $(#[$doc:meta])*) => {
+        impl_minimal_iuf!([$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+                          | $($elem_ids),* | $(#[$doc])*);
+        impl_ops_vector_arithmetic!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_scalar_arithmetic!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_neg!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_ops_vector_float_min_max!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt
+        );
+        impl_reduction_float_arithmetic!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_reduction_min_max!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+        );
+        impl_fmt_debug!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_from_array!([$elem_ty; $elem_n]: $tuple_id | $test_tt | (1., 1.));
+        impl_from_vectors!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | $($from_vec_ty),*
+        );
+        impl_default!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_partial_eq!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (1., 0.)
+        );
+        impl_slice_from_slice!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_slice_write_to_slice!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_shuffle1_dyn!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+
+        impl_float_consts!([$elem_ty; $elem_n]: $tuple_id);
+        impl_float_category!([$elem_ty; $elem_n]: $tuple_id, $mask_ty);
+
+        // floating-point math
+        impl_math_float_abs!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_cos!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_exp!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_ln!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_mul_add!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_mul_adde!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_powf!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_recpre!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_rsqrte!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_sin!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_sqrt!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_sqrte!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_math_float_tanh!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_vertical!(
+            [$elem_ty; $elem_n]: $tuple_id, $mask_ty, false, (1., 0.)
+                | $test_tt
+        );
+
+        test_select!($elem_ty, $mask_ty, $tuple_id, (1., 2.) | $test_tt);
+        test_reduction_float_min_max!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt
+        );
+        test_shuffle1_dyn!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+    }
+}
+
+macro_rules! impl_m {
+    ([$elem_ty:ident; $elem_n:expr]: $tuple_id:ident
+     | $ielem_ty:ident, $ibitmask_ty:ident
+     | $test_tt:tt | $($elem_ids:ident),* | From: $($from_vec_ty:ident),*
+     | $(#[$doc:meta])*) => {
+        impl_minimal_mask!(
+            [$elem_ty; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+            | $($elem_ids),* | $(#[$doc])*
+        );
+        impl_ops_vector_mask_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (true, false)
+        );
+        impl_ops_scalar_mask_bitwise!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (true, false)
+        );
+        impl_reduction_bitwise!(
+            [bool; $elem_n]: $tuple_id | $ielem_ty | $test_tt
+                | (|x|{ x != 0 }) | (true, false)
+        );
+        impl_reduction_mask!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_fmt_debug!([bool; $elem_n]: $tuple_id | $test_tt);
+        impl_from_array!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt
+            | (crate::$elem_ty::new(true), true)
+        );
+        impl_from_vectors!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | $($from_vec_ty),*
+        );
+        impl_default!([bool; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_partial_eq!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (true, false)
+        );
+        impl_cmp_eq!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (true, false)
+        );
+        impl_cmp_vertical!(
+            [$elem_ty; $elem_n]: $tuple_id, $tuple_id, true, (true, false)
+            | $test_tt
+        );
+        impl_select!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_partial_ord!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_cmp_ord!(
+            [$elem_ty; $elem_n]: $tuple_id | $test_tt | (false, true)
+        );
+        impl_shuffle1_dyn!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        impl_bitmask!($tuple_id | $ibitmask_ty | (true, false) | $test_tt);
+
+        test_cmp_partial_ord_mask!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+        test_shuffle1_dyn_mask!([$elem_ty; $elem_n]: $tuple_id | $test_tt);
+    }
+}
+
+macro_rules! impl_const_p {
+    ([$elem_ty:ty; $elem_n:expr]: $tuple_id:ident, $mask_ty:ident,
+     $usize_ty:ident, $isize_ty:ident
+     | $test_tt:tt | $($elem_ids:ident),*
+     | From: $($from_vec_ty:ident),* | $(#[$doc:meta])*) => {
+        impl_minimal_p!(
+            [$elem_ty; $elem_n]: $tuple_id, $mask_ty, $usize_ty, $isize_ty
+                | ref_ | $test_tt | $($elem_ids),*
+                | (1 as $elem_ty, 0 as $elem_ty) | $(#[$doc])*
+        );
+        impl_ptr_read!([$elem_ty; $elem_n]: $tuple_id, $mask_ty | $test_tt);
+    }
+}
+
+macro_rules! impl_mut_p {
+    ([$elem_ty:ty; $elem_n:expr]: $tuple_id:ident, $mask_ty:ident,
+     $usize_ty:ident, $isize_ty:ident
+     | $test_tt:tt | $($elem_ids:ident),*
+     | From: $($from_vec_ty:ident),* | $(#[$doc:meta])*) => {
+        impl_minimal_p!(
+            [$elem_ty; $elem_n]: $tuple_id, $mask_ty, $usize_ty, $isize_ty
+                | ref_mut_ | $test_tt | $($elem_ids),*
+                | (1 as $elem_ty, 0 as $elem_ty) | $(#[$doc])*
+        );
+        impl_ptr_read!([$elem_ty; $elem_n]: $tuple_id, $mask_ty | $test_tt);
+        impl_ptr_write!([$elem_ty; $elem_n]: $tuple_id, $mask_ty | $test_tt);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/bit_manip.rs.html b/src/packed_simd/api/bit_manip.rs.html new file mode 100644 index 000000000..efc042592 --- /dev/null +++ b/src/packed_simd/api/bit_manip.rs.html @@ -0,0 +1,259 @@ +bit_manip.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+
+//! Bit manipulations.
+
+macro_rules! impl_bit_manip {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Returns the number of ones in the binary representation of
+            /// the lanes of `self`.
+            #[inline]
+            pub fn count_ones(self) -> Self {
+                super::codegen::bit_manip::BitManip::ctpop(self)
+            }
+
+            /// Returns the number of zeros in the binary representation of
+            /// the lanes of `self`.
+            #[inline]
+            pub fn count_zeros(self) -> Self {
+                super::codegen::bit_manip::BitManip::ctpop(!self)
+            }
+
+            /// Returns the number of leading zeros in the binary
+            /// representation of the lanes of `self`.
+            #[inline]
+            pub fn leading_zeros(self) -> Self {
+                super::codegen::bit_manip::BitManip::ctlz(self)
+            }
+
+            /// Returns the number of trailing zeros in the binary
+            /// representation of the lanes of `self`.
+            #[inline]
+            pub fn trailing_zeros(self) -> Self {
+                super::codegen::bit_manip::BitManip::cttz(self)
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item_with_macros! {
+                #[allow(overflowing_literals)]
+                pub mod [<$id _bit_manip>] {
+                    use super::*;
+
+                    const LANE_WIDTH: usize = mem::size_of::<$elem_ty>() * 8;
+
+                    macro_rules! test_func {
+                        ($x:expr, $func:ident) => {{
+                            let mut actual = $x;
+                            for i in 0..$id::lanes() {
+                                actual = actual.replace(
+                                    i,
+                                    $x.extract(i).$func() as $elem_ty
+                                );
+                            }
+                            let expected = $x.$func();
+                            assert_eq!(actual, expected);
+                        }};
+                    }
+
+                    const BYTES: [u8; 64] = [
+                        0, 1, 2, 3, 4, 5, 6, 7,
+                        8, 9, 10, 11, 12, 13, 14, 15,
+                        16, 17, 18, 19, 20, 21, 22, 23,
+                        24, 25, 26, 27, 28, 29, 30, 31,
+                        32, 33, 34, 35, 36, 37, 38, 39,
+                        40, 41, 42, 43, 44, 45, 46, 47,
+                        48, 49, 50, 51, 52, 53, 54, 55,
+                        56, 57, 58, 59, 60, 61, 62, 63,
+                    ];
+
+                    fn load_bytes() -> $id {
+                        let elems: &mut [$elem_ty] = unsafe {
+                            slice::from_raw_parts_mut(
+                                BYTES.as_mut_ptr() as *mut $elem_ty,
+                                $id::lanes(),
+                            )
+                        };
+                        $id::from_slice_unaligned(elems)
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn count_ones() {
+                        test_func!($id::splat(0), count_ones);
+                        test_func!($id::splat(!0), count_ones);
+                        test_func!(load_bytes(), count_ones);
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn count_zeros() {
+                        test_func!($id::splat(0), count_zeros);
+                        test_func!($id::splat(!0), count_zeros);
+                        test_func!(load_bytes(), count_zeros);
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn leading_zeros() {
+                        test_func!($id::splat(0), leading_zeros);
+                        test_func!($id::splat(1), leading_zeros);
+                        // some implementations use `pshufb` which has unique
+                        // behavior when the 8th bit is set.
+                        test_func!($id::splat(0b1000_0010), leading_zeros);
+                        test_func!($id::splat(!0), leading_zeros);
+                        test_func!(
+                            $id::splat(1 << (LANE_WIDTH - 1)),
+                            leading_zeros
+                        );
+                        test_func!(load_bytes(), leading_zeros);
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn trailing_zeros() {
+                        test_func!($id::splat(0), trailing_zeros);
+                        test_func!($id::splat(1), trailing_zeros);
+                        test_func!($id::splat(0b1000_0010), trailing_zeros);
+                        test_func!($id::splat(!0), trailing_zeros);
+                        test_func!(
+                            $id::splat(1 << (LANE_WIDTH - 1)),
+                            trailing_zeros
+                        );
+                        test_func!(load_bytes(), trailing_zeros);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/bitmask.rs.html b/src/packed_simd/api/bitmask.rs.html new file mode 100644 index 000000000..63bcdc7a5 --- /dev/null +++ b/src/packed_simd/api/bitmask.rs.html @@ -0,0 +1,167 @@ +bitmask.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+
+//! Bitmask API
+
+macro_rules! impl_bitmask {
+    ($id:ident | $ibitmask_ty:ident | ($set:expr, $clear:expr)
+     | $test_tt:tt) => {
+        impl $id {
+            /// Creates a bitmask with the MSB of each vector lane.
+            ///
+            /// If the vector has less than 8 lanes, the bits that do not
+            /// correspond to any vector lanes are cleared.
+            #[inline]
+            pub fn bitmask(self) -> $ibitmask_ty {
+                unsafe { codegen::llvm::simd_bitmask(self.0) }
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                #[cfg(not(any(
+                    // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/210
+                    all(target_arch = "mips", target_endian = "big"),
+                    all(target_arch = "mips64", target_endian = "big"),
+                    target_arch = "sparc64",
+                    target_arch = "s390x",
+                )))]
+                pub mod [<$id _bitmask>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn bitmask() {
+                        // clear all lanes
+                        let vec = $id::splat($clear as _);
+                        let bitmask: $ibitmask_ty = 0;
+                        assert_eq!(vec.bitmask(), bitmask);
+
+                        // set even lanes
+                        let mut vec = $id::splat($clear as _);
+                        for i in 0..$id::lanes() {
+                            if i % 2 == 0 {
+                                vec = vec.replace(i, $set as _);
+                            }
+                        }
+                        // create bitmask with even lanes set:
+                        let mut bitmask: $ibitmask_ty = 0;
+                        for i in 0..$id::lanes() {
+                            if i % 2 == 0 {
+                                bitmask |= 1 << i;
+                            }
+                        }
+                        assert_eq!(vec.bitmask(), bitmask);
+
+
+                        // set odd lanes
+                        let mut vec = $id::splat($clear as _);
+                        for i in 0..$id::lanes() {
+                            if i % 2 != 0 {
+                                vec = vec.replace(i, $set as _);
+                            }
+                        }
+                        // create bitmask with odd lanes set:
+                        let mut bitmask: $ibitmask_ty = 0;
+                        for i in 0..$id::lanes() {
+                            if i % 2 != 0 {
+                                bitmask |= 1 << i;
+                            }
+                        }
+                        assert_eq!(vec.bitmask(), bitmask);
+
+                        // set all lanes
+                        let vec = $id::splat($set as _);
+                        let mut bitmask: $ibitmask_ty = 0;
+                        for i in 0..$id::lanes() {
+                            bitmask |= 1 << i;
+                        }
+                        assert_eq!(vec.bitmask(), bitmask);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast.rs.html b/src/packed_simd/api/cast.rs.html new file mode 100644 index 000000000..00ef828a7 --- /dev/null +++ b/src/packed_simd/api/cast.rs.html @@ -0,0 +1,219 @@ +cast.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+
+//! Implementation of `FromCast` and `IntoCast`.
+#![allow(clippy::module_name_repetitions)]
+
+/// Numeric cast from `T` to `Self`.
+///
+/// > Note: This is a temporary workaround until the conversion traits
+/// specified > in [RFC2484] are implemented.
+///
+/// Numeric cast between vectors with the same number of lanes, such that:
+///
+/// * casting integer vectors whose lane types have the same size (e.g. `i32xN`
+/// -> `u32xN`) is a **no-op**,
+///
+/// * casting from a larger integer to a smaller integer (e.g. `u32xN` ->
+/// `u8xN`) will **truncate**,
+///
+/// * casting from a smaller integer to a larger integer   (e.g. `u8xN` ->
+///   `u32xN`) will:
+///    * **zero-extend** if the source is unsigned, or
+///    * **sign-extend** if the source is signed,
+///
+/// * casting from a float to an integer will **round the float towards zero**,
+///
+/// * casting from an integer to float will produce the floating point
+/// representation of the integer, **rounding to nearest, ties to even**,
+///
+/// * casting from an `f32` to an `f64` is perfect and lossless,
+///
+/// * casting from an `f64` to an `f32` **rounds to nearest, ties to even**.
+///
+/// [RFC2484]: https://github.com/rust-lang/rfcs/pull/2484
+pub trait FromCast<T>: crate::marker::Sized {
+    /// Numeric cast from `T` to `Self`.
+    fn from_cast(_: T) -> Self;
+}
+
+/// Numeric cast from `Self` to `T`.
+///
+/// > Note: This is a temporary workaround until the conversion traits
+/// specified > in [RFC2484] are implemented.
+///
+/// Numeric cast between vectors with the same number of lanes, such that:
+///
+/// * casting integer vectors whose lane types have the same size (e.g. `i32xN`
+/// -> `u32xN`) is a **no-op**,
+///
+/// * casting from a larger integer to a smaller integer (e.g. `u32xN` ->
+/// `u8xN`) will **truncate**,
+///
+/// * casting from a smaller integer to a larger integer   (e.g. `u8xN` ->
+///   `u32xN`) will:
+///    * **zero-extend** if the source is unsigned, or
+///    * **sign-extend** if the source is signed,
+///
+/// * casting from a float to an integer will **round the float towards zero**,
+///
+/// * casting from an integer to float will produce the floating point
+/// representation of the integer, **rounding to nearest, ties to even**,
+///
+/// * casting from an `f32` to an `f64` is perfect and lossless,
+///
+/// * casting from an `f64` to an `f32` **rounds to nearest, ties to even**.
+///
+/// [RFC2484]: https://github.com/rust-lang/rfcs/pull/2484
+pub trait Cast<T>: crate::marker::Sized {
+    /// Numeric cast from `self` to `T`.
+    fn cast(self) -> T;
+}
+
+/// `FromCast` implies `Cast`.
+impl<T, U> Cast<U> for T
+where
+    U: FromCast<T>,
+{
+    #[inline]
+    fn cast(self) -> U {
+        U::from_cast(self)
+    }
+}
+
+/// `FromCast` and `Cast` are reflexive
+impl<T> FromCast<T> for T {
+    #[inline]
+    fn from_cast(t: Self) -> Self {
+        t
+    }
+}
+
+#[macro_use]
+mod macros;
+
+mod v16;
+pub use self::v16::*;
+
+mod v32;
+pub use self::v32::*;
+
+mod v64;
+pub use self::v64::*;
+
+mod v128;
+pub use self::v128::*;
+
+mod v256;
+pub use self::v256::*;
+
+mod v512;
+pub use self::v512::*;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast/macros.rs.html b/src/packed_simd/api/cast/macros.rs.html new file mode 100644 index 000000000..99ce4d8f6 --- /dev/null +++ b/src/packed_simd/api/cast/macros.rs.html @@ -0,0 +1,167 @@ +macros.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+
+//! Macros implementing `FromCast`
+
+macro_rules! impl_from_cast_ {
+    ($id:ident[$test_tt:tt]: $from_ty:ident) => {
+        impl crate::api::cast::FromCast<$from_ty> for $id {
+            #[inline]
+            fn from_cast(x: $from_ty) -> Self {
+                use crate::llvm::simd_cast;
+                debug_assert_eq!($from_ty::lanes(), $id::lanes());
+                Simd(unsafe { simd_cast(x.0) })
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _from_cast_ $from_ty>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn test() {
+                        assert_eq!($id::lanes(), $from_ty::lanes());
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! impl_from_cast {
+    ($id:ident[$test_tt:tt]: $($from_ty:ident),*) => {
+        $(
+            impl_from_cast_!($id[$test_tt]: $from_ty);
+        )*
+    }
+}
+
+macro_rules! impl_from_cast_mask_ {
+    ($id:ident[$test_tt:tt]: $from_ty:ident) => {
+        impl crate::api::cast::FromCast<$from_ty> for $id {
+            #[inline]
+            fn from_cast(x: $from_ty) -> Self {
+                debug_assert_eq!($from_ty::lanes(), $id::lanes());
+                x.ne($from_ty::default())
+                    .select($id::splat(true), $id::splat(false))
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _from_cast_ $from_ty>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn test() {
+                        assert_eq!($id::lanes(), $from_ty::lanes());
+
+                        let x = $from_ty::default();
+                        let m: $id = x.cast();
+                        assert!(m.none());
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! impl_from_cast_mask {
+    ($id:ident[$test_tt:tt]: $($from_ty:ident),*) => {
+        $(
+            impl_from_cast_mask_!($id[$test_tt]: $from_ty);
+        )*
+    }
+}
+
+#[allow(unused)]
+macro_rules! impl_into_cast {
+    ($id:ident[$test_tt:tt]: $($from_ty:ident),*) => {
+        $(
+            impl_from_cast_!($from_ty[$test_tt]: $id);
+        )*
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast/v128.rs.html b/src/packed_simd/api/cast/v128.rs.html new file mode 100644 index 000000000..c07d7a179 --- /dev/null +++ b/src/packed_simd/api/cast/v128.rs.html @@ -0,0 +1,161 @@ +v128.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+
+//! `FromCast` and `IntoCast` implementations for portable 128-bit wide vectors
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_from_cast!(
+    i8x16[test_v128]: u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
+);
+impl_from_cast!(
+    u8x16[test_v128]: i8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
+);
+impl_from_cast_mask!(
+    m8x16[test_v128]: i8x16, u8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16, m32x16
+);
+
+impl_from_cast!(
+    i16x8[test_v128]: i8x8, u8x8, m8x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast!(
+    u16x8[test_v128]: i8x8, u8x8, m8x8, i16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast_mask!(
+    m16x8[test_v128]: i8x8, u8x8, m8x8, i16x8, u16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+
+impl_from_cast!(
+    i32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast!(
+    u32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast!(
+    f32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast_mask!(
+    m32x4[test_v128]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+
+impl_from_cast!(
+    i64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast!(
+    u64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast!(
+    f64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast_mask!(
+    m64x2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+
+impl_from_cast!(
+    isizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, usizex2, msizex2
+);
+impl_from_cast!(
+    usizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, msizex2
+);
+impl_from_cast_mask!(
+    msizex2[test_v128]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2
+);
+
+// FIXME[test_v128]: 64-bit single element vectors into_cast impls
+impl_from_cast!(i128x1[test_v128]: u128x1, m128x1);
+impl_from_cast!(u128x1[test_v128]: i128x1, m128x1);
+impl_from_cast!(m128x1[test_v128]: i128x1, u128x1);
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast/v16.rs.html b/src/packed_simd/api/cast/v16.rs.html new file mode 100644 index 000000000..1a4970055 --- /dev/null +++ b/src/packed_simd/api/cast/v16.rs.html @@ -0,0 +1,37 @@ +v16.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+
+//! `FromCast` and `IntoCast` implementations for portable 16-bit wide vectors
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_from_cast!(
+    i8x2[test_v16]: u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast!(
+    u8x2[test_v16]: i8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast_mask!(
+    m8x2[test_v16]: i8x2, u8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast/v256.rs.html b/src/packed_simd/api/cast/v256.rs.html new file mode 100644 index 000000000..548bbb82d --- /dev/null +++ b/src/packed_simd/api/cast/v256.rs.html @@ -0,0 +1,165 @@ +v256.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+
+//! `FromCast` and `IntoCast` implementations for portable 256-bit wide vectors
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_from_cast!(i8x32[test_v256]: u8x32, m8x32, i16x32, u16x32, m16x32);
+impl_from_cast!(u8x32[test_v256]: i8x32, m8x32, i16x32, u16x32, m16x32);
+impl_from_cast_mask!(m8x32[test_v256]: i8x32, u8x32, i16x32, u16x32, m16x32);
+
+impl_from_cast!(
+    i16x16[test_v256]: i8x16, u8x16, m8x16, u16x16, m16x16,
+    i32x16, u32x16, f32x16, m32x16
+);
+impl_from_cast!(
+    u16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, m16x16,
+    i32x16, u32x16, f32x16, m32x16
+);
+impl_from_cast_mask!(
+    m16x16[test_v256]: i8x16, u8x16, m8x16, i16x16, u16x16,
+    i32x16, u32x16, f32x16, m32x16
+);
+
+impl_from_cast!(
+    i32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast!(
+    u32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast!(
+    f32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast_mask!(
+    m32x8[test_v256]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+
+impl_from_cast!(
+    i64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast!(
+    u64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast!(
+    f64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast_mask!(
+    m64x4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+
+impl_from_cast!(
+    i128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast!(
+    u128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast_mask!(
+    m128x2[test_v256]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, m64x2, f64x2, i128x2, u128x2, isizex2, usizex2, msizex2
+);
+
+impl_from_cast!(
+    isizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, usizex4, msizex4
+);
+impl_from_cast!(
+    usizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, msizex4
+);
+impl_from_cast_mask!(
+    msizex4[test_v256]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast/v32.rs.html b/src/packed_simd/api/cast/v32.rs.html new file mode 100644 index 000000000..d637518a6 --- /dev/null +++ b/src/packed_simd/api/cast/v32.rs.html @@ -0,0 +1,63 @@ +v32.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+
+//! `FromCast` and `IntoCast` implementations for portable 32-bit wide vectors
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_from_cast!(
+    i8x4[test_v32]: u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast!(
+    u8x4[test_v32]: i8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast_mask!(
+    m8x4[test_v32]: i8x4, u8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+
+impl_from_cast!(
+    i16x2[test_v32]: i8x2, u8x2, m8x2, u16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast!(
+    u16x2[test_v32]: i8x2, u8x2, m8x2, i16x2, m16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast_mask!(
+    m16x2[test_v32]: i8x2, u8x2, m8x2, i16x2, u16x2, i32x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast/v512.rs.html b/src/packed_simd/api/cast/v512.rs.html new file mode 100644 index 000000000..85aef4d02 --- /dev/null +++ b/src/packed_simd/api/cast/v512.rs.html @@ -0,0 +1,139 @@ +v512.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+
+//! `FromCast` and `IntoCast` implementations for portable 512-bit wide vectors
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_from_cast!(i8x64[test_v512]: u8x64, m8x64);
+impl_from_cast!(u8x64[test_v512]: i8x64, m8x64);
+impl_from_cast_mask!(m8x64[test_v512]: i8x64, u8x64);
+
+impl_from_cast!(i16x32[test_v512]: i8x32, u8x32, m8x32, u16x32, m16x32);
+impl_from_cast!(u16x32[test_v512]: i8x32, u8x32, m8x32, i16x32, m16x32);
+impl_from_cast_mask!(m16x32[test_v512]: i8x32, u8x32, m8x32, i16x32, u16x32);
+
+impl_from_cast!(
+    i32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, u32x16, f32x16, m32x16
+);
+impl_from_cast!(
+    u32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, f32x16, m32x16
+);
+impl_from_cast!(
+    f32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, m32x16
+);
+impl_from_cast_mask!(
+    m32x16[test_v512]: i8x16, u8x16, m8x16, i16x16, u16x16, m16x16, i32x16, u32x16, f32x16
+);
+
+impl_from_cast!(
+    i64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast!(
+    u64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast!(
+    f64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast_mask!(
+    m64x8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, isizex8, usizex8, msizex8
+);
+
+impl_from_cast!(
+    i128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast!(
+    u128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast_mask!(
+    m128x4[test_v512]: i8x4, u8x4, m8x4, i16x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, m64x4, f64x4, i128x4, u128x4, isizex4, usizex4, msizex4
+);
+
+impl_from_cast!(
+    isizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, usizex8, msizex8
+);
+impl_from_cast!(
+    usizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, msizex8
+);
+impl_from_cast_mask!(
+    msizex8[test_v512]: i8x8, u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cast/v64.rs.html b/src/packed_simd/api/cast/v64.rs.html new file mode 100644 index 000000000..2a4ce9375 --- /dev/null +++ b/src/packed_simd/api/cast/v64.rs.html @@ -0,0 +1,97 @@ +v64.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+
+//! `FromCast` and `IntoCast` implementations for portable 64-bit wide vectors
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_from_cast!(
+    i8x8[test_v64]: u8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast!(
+    u8x8[test_v64]: i8x8, m8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+impl_from_cast_mask!(
+    m8x8[test_v64]: i8x8, u8x8, i16x8, u16x8, m16x8, i32x8, u32x8, f32x8, m32x8,
+    i64x8, u64x8, f64x8, m64x8, isizex8, usizex8, msizex8
+);
+
+impl_from_cast!(
+    i16x4[test_v64]: i8x4, u8x4, m8x4, u16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast!(
+    u16x4[test_v64]: i8x4, u8x4, m8x4, i16x4, m16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+impl_from_cast_mask!(
+    m16x4[test_v64]: i8x4, u8x4, m8x4, i16x4, u16x4, i32x4, u32x4, f32x4, m32x4,
+    i64x4, u64x4, f64x4, m64x4, i128x4, u128x4, m128x4, isizex4, usizex4, msizex4
+);
+
+impl_from_cast!(
+    i32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, u32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast!(
+    u32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, f32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast!(
+    f32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, m32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+impl_from_cast_mask!(
+    m32x2[test_v64]: i8x2, u8x2, m8x2, i16x2, u16x2, m16x2, i32x2, u32x2, f32x2,
+    i64x2, u64x2, f64x2, m64x2, i128x2, u128x2, m128x2, isizex2, usizex2, msizex2
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cmp.rs.html b/src/packed_simd/api/cmp.rs.html new file mode 100644 index 000000000..58bfafca1 --- /dev/null +++ b/src/packed_simd/api/cmp.rs.html @@ -0,0 +1,35 @@ +cmp.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+
+//! Implement cmp traits for vector types
+
+#[macro_use]
+mod partial_eq;
+
+#[macro_use]
+mod eq;
+
+#[macro_use]
+mod partial_ord;
+
+#[macro_use]
+mod ord;
+
+#[macro_use]
+mod vertical;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cmp/eq.rs.html b/src/packed_simd/api/cmp/eq.rs.html new file mode 100644 index 000000000..794892312 --- /dev/null +++ b/src/packed_simd/api/cmp/eq.rs.html @@ -0,0 +1,57 @@ +eq.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+
+//! Implements `Eq` for vector types.
+
+macro_rules! impl_cmp_eq {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $test_tt:tt |
+        ($true:expr, $false:expr)
+    ) => {
+        impl crate::cmp::Eq for $id {}
+        impl crate::cmp::Eq for LexicographicallyOrdered<$id> {}
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_eq>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn eq() {
+                        fn foo<E: crate::cmp::Eq>(_: E) {}
+                        let a = $id::splat($false);
+                        foo(a);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cmp/ord.rs.html b/src/packed_simd/api/cmp/ord.rs.html new file mode 100644 index 000000000..89be885db --- /dev/null +++ b/src/packed_simd/api/cmp/ord.rs.html @@ -0,0 +1,89 @@ +ord.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+
+//! Implements `Ord` for vector types.
+
+macro_rules! impl_cmp_ord {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $test_tt:tt |
+        ($true:expr, $false:expr)
+    ) => {
+        impl $id {
+            /// Returns a wrapper that implements `Ord`.
+            #[inline]
+            pub fn lex_ord(&self) -> LexicographicallyOrdered<$id> {
+                LexicographicallyOrdered(*self)
+            }
+        }
+
+        impl crate::cmp::Ord for LexicographicallyOrdered<$id> {
+            #[inline]
+            fn cmp(&self, other: &Self) -> crate::cmp::Ordering {
+                match self.partial_cmp(other) {
+                    Some(x) => x,
+                    None => unsafe { crate::hint::unreachable_unchecked() },
+                }
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_ord>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn eq() {
+                        fn foo<E: crate::cmp::Ord>(_: E) {}
+                        let a = $id::splat($false);
+                        foo(a.partial_lex_ord());
+                        foo(a.lex_ord());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cmp/partial_eq.rs.html b/src/packed_simd/api/cmp/partial_eq.rs.html new file mode 100644 index 000000000..e427f3654 --- /dev/null +++ b/src/packed_simd/api/cmp/partial_eq.rs.html @@ -0,0 +1,137 @@ +partial_eq.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+
+//! Implements `PartialEq` for vector types.
+
+macro_rules! impl_cmp_partial_eq {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $test_tt:tt |
+        ($true:expr, $false:expr)
+    ) => {
+        // FIXME: https://github.com/rust-lang-nursery/rust-clippy/issues/2892
+        #[allow(clippy::partialeq_ne_impl)]
+        impl crate::cmp::PartialEq<$id> for $id {
+            #[inline]
+            fn eq(&self, other: &Self) -> bool {
+                $id::eq(*self, *other).all()
+            }
+            #[inline]
+            fn ne(&self, other: &Self) -> bool {
+                $id::ne(*self, *other).any()
+            }
+        }
+
+        // FIXME: https://github.com/rust-lang-nursery/rust-clippy/issues/2892
+        #[allow(clippy::partialeq_ne_impl)]
+        impl crate::cmp::PartialEq<LexicographicallyOrdered<$id>>
+            for LexicographicallyOrdered<$id>
+        {
+            #[inline]
+            fn eq(&self, other: &Self) -> bool {
+                self.0 == other.0
+            }
+            #[inline]
+            fn ne(&self, other: &Self) -> bool {
+                self.0 != other.0
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_PartialEq>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn partial_eq() {
+                        let a = $id::splat($false);
+                        let b = $id::splat($true);
+
+                        assert!(a != b);
+                        assert!(!(a == b));
+                        assert!(a == a);
+                        assert!(!(a != a));
+
+                        if $id::lanes() > 1 {
+                            let a = $id::splat($false).replace(0, $true);
+                            let b = $id::splat($true);
+
+                            assert!(a != b);
+                            assert!(!(a == b));
+                            assert!(a == a);
+                            assert!(!(a != a));
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cmp/partial_ord.rs.html b/src/packed_simd/api/cmp/partial_ord.rs.html new file mode 100644 index 000000000..319ed66b9 --- /dev/null +++ b/src/packed_simd/api/cmp/partial_ord.rs.html @@ -0,0 +1,471 @@ +partial_ord.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+
+//! Implements `PartialOrd` for vector types.
+//!
+//! This implements a lexicographical order.
+
+macro_rules! impl_cmp_partial_ord {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Returns a wrapper that implements `PartialOrd`.
+            #[inline]
+            pub fn partial_lex_ord(&self) -> LexicographicallyOrdered<$id> {
+                LexicographicallyOrdered(*self)
+            }
+        }
+
+        impl crate::cmp::PartialOrd<LexicographicallyOrdered<$id>>
+            for LexicographicallyOrdered<$id>
+        {
+            #[inline]
+            fn partial_cmp(
+                &self, other: &Self,
+            ) -> Option<crate::cmp::Ordering> {
+                if PartialEq::eq(self, other) {
+                    Some(crate::cmp::Ordering::Equal)
+                } else if PartialOrd::lt(self, other) {
+                    Some(crate::cmp::Ordering::Less)
+                } else if PartialOrd::gt(self, other) {
+                    Some(crate::cmp::Ordering::Greater)
+                } else {
+                    None
+                }
+            }
+            #[inline]
+            fn lt(&self, other: &Self) -> bool {
+                let m_lt = self.0.lt(other.0);
+                let m_eq = self.0.eq(other.0);
+                for i in 0..$id::lanes() {
+                    if m_eq.extract(i) {
+                        continue;
+                    }
+                    return m_lt.extract(i);
+                }
+                false
+            }
+            #[inline]
+            fn le(&self, other: &Self) -> bool {
+                self.lt(other) | PartialEq::eq(self, other)
+            }
+            #[inline]
+            fn ge(&self, other: &Self) -> bool {
+                self.gt(other) | PartialEq::eq(self, other)
+            }
+            #[inline]
+            fn gt(&self, other: &Self) -> bool {
+                let m_gt = self.0.gt(other.0);
+                let m_eq = self.0.eq(other.0);
+                for i in 0..$id::lanes() {
+                    if m_eq.extract(i) {
+                        continue;
+                    }
+                    return m_gt.extract(i);
+                }
+                false
+            }
+        }
+    };
+}
+
+macro_rules! test_cmp_partial_ord_int {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_PartialOrd>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn partial_lex_ord() {
+                        use crate::testing::utils::{test_cmp};
+                        // constant values
+                        let a = $id::splat(0);
+                        let b = $id::splat(1);
+
+                        test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Less));
+                        test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Greater));
+                        test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Equal));
+                        test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Equal));
+
+                        // variable values: a = [0, 1, 2, 3]; b = [3, 2, 1, 0]
+                        let mut a = $id::splat(0);
+                        let mut b = $id::splat(0);
+                        for i in 0..$id::lanes() {
+                            a = a.replace(i, i as $elem_ty);
+                            b = b.replace(i, ($id::lanes() - i) as $elem_ty);
+                        }
+                        test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Less));
+                        test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Greater));
+                        test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Equal));
+                        test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Equal));
+
+                        // variable values: a = [0, 1, 2, 3]; b = [0, 1, 2, 4]
+                        let mut b = a;
+                        b = b.replace(
+                            $id::lanes() - 1,
+                            a.extract($id::lanes() - 1) + 1 as $elem_ty
+                        );
+                        test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Less));
+                        test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Greater));
+                        test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Equal));
+                        test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(crate::cmp::Ordering::Equal));
+
+                        if $id::lanes() > 2 {
+                            // variable values a = [0, 1, 0, 0]; b = [0, 1, 2, 3]
+                            let b = a;
+                            let mut a = $id::splat(0);
+                            a = a.replace(1, 1 as $elem_ty);
+                            test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Less));
+                            test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Greater));
+                            test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Equal));
+                            test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Equal));
+
+                            // variable values: a = [0, 1, 2, 3]; b = [0, 1, 3, 2]
+                            let mut b = a;
+                            b = b.replace(
+                                2, a.extract($id::lanes() - 1) + 1 as $elem_ty
+                            );
+                            test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Less));
+                            test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Greater));
+                            test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Equal));
+                            test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                     Some(crate::cmp::Ordering::Equal));
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! test_cmp_partial_ord_mask {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_PartialOrd>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn partial_lex_ord() {
+                        use crate::testing::utils::{test_cmp};
+                        use crate::cmp::Ordering;
+
+                        // constant values
+                        let a = $id::splat(false);
+                        let b = $id::splat(true);
+
+                        test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(Ordering::Less));
+                        test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(Ordering::Greater));
+                        test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(Ordering::Equal));
+                        test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(Ordering::Equal));
+
+                        // variable values:
+                        // a = [false, false, false, false];
+                        // b = [false, false, false, true]
+                        let a = $id::splat(false);
+                        let mut b = $id::splat(false);
+                        b = b.replace($id::lanes() - 1, true);
+                        test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(Ordering::Less));
+                        test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(Ordering::Greater));
+                        test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(Ordering::Equal));
+                        test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(Ordering::Equal));
+
+                        // variable values:
+                        // a = [true, true, true, false];
+                        // b = [true, true, true, true]
+                        let mut a = $id::splat(true);
+                        let b = $id::splat(true);
+                        a = a.replace($id::lanes() - 1, false);
+                        test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(Ordering::Less));
+                        test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(Ordering::Greater));
+                        test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                 Some(Ordering::Equal));
+                        test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                 Some(Ordering::Equal));
+
+                        if $id::lanes() > 2 {
+                            // variable values
+                            // a = [false, true, false, false];
+                            // b = [false, true, true, true]
+                            let mut a = $id::splat(false);
+                            let mut b = $id::splat(true);
+                            a = a.replace(1, true);
+                            b = b.replace(0, false);
+                            test_cmp(a.partial_lex_ord(), b.partial_lex_ord(),
+                                     Some(Ordering::Less));
+                            test_cmp(b.partial_lex_ord(), a.partial_lex_ord(),
+                                     Some(Ordering::Greater));
+                            test_cmp(a.partial_lex_ord(), a.partial_lex_ord(),
+                                     Some(Ordering::Equal));
+                            test_cmp(b.partial_lex_ord(), b.partial_lex_ord(),
+                                     Some(Ordering::Equal));
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/cmp/vertical.rs.html b/src/packed_simd/api/cmp/vertical.rs.html new file mode 100644 index 000000000..bacad11d3 --- /dev/null +++ b/src/packed_simd/api/cmp/vertical.rs.html @@ -0,0 +1,231 @@ +vertical.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+
+//! Vertical (lane-wise) vector comparisons returning vector masks.
+
+macro_rules! impl_cmp_vertical {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident,
+        $mask_ty:ident,
+        $is_mask:expr,($true:expr, $false:expr) | $test_tt:tt
+    ) => {
+        impl $id {
+            /// Lane-wise equality comparison.
+            #[inline]
+            pub fn eq(self, other: Self) -> $mask_ty {
+                use crate::llvm::simd_eq;
+                Simd(unsafe { simd_eq(self.0, other.0) })
+            }
+
+            /// Lane-wise inequality comparison.
+            #[inline]
+            pub fn ne(self, other: Self) -> $mask_ty {
+                use crate::llvm::simd_ne;
+                Simd(unsafe { simd_ne(self.0, other.0) })
+            }
+
+            /// Lane-wise less-than comparison.
+            #[inline]
+            pub fn lt(self, other: Self) -> $mask_ty {
+                use crate::llvm::{simd_gt, simd_lt};
+                if $is_mask {
+                    Simd(unsafe { simd_gt(self.0, other.0) })
+                } else {
+                    Simd(unsafe { simd_lt(self.0, other.0) })
+                }
+            }
+
+            /// Lane-wise less-than-or-equals comparison.
+            #[inline]
+            pub fn le(self, other: Self) -> $mask_ty {
+                use crate::llvm::{simd_ge, simd_le};
+                if $is_mask {
+                    Simd(unsafe { simd_ge(self.0, other.0) })
+                } else {
+                    Simd(unsafe { simd_le(self.0, other.0) })
+                }
+            }
+
+            /// Lane-wise greater-than comparison.
+            #[inline]
+            pub fn gt(self, other: Self) -> $mask_ty {
+                use crate::llvm::{simd_gt, simd_lt};
+                if $is_mask {
+                    Simd(unsafe { simd_lt(self.0, other.0) })
+                } else {
+                    Simd(unsafe { simd_gt(self.0, other.0) })
+                }
+            }
+
+            /// Lane-wise greater-than-or-equals comparison.
+            #[inline]
+            pub fn ge(self, other: Self) -> $mask_ty {
+                use crate::llvm::{simd_ge, simd_le};
+                if $is_mask {
+                    Simd(unsafe { simd_le(self.0, other.0) })
+                } else {
+                    Simd(unsafe { simd_ge(self.0, other.0) })
+                }
+            }
+        }
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_vertical>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn cmp() {
+                        let a = $id::splat($false);
+                        let b = $id::splat($true);
+
+                        let r = a.lt(b);
+                        let e = $mask_ty::splat(true);
+                        assert!(r == e);
+                        let r = a.le(b);
+                        assert!(r == e);
+
+                        let e = $mask_ty::splat(false);
+                        let r = a.gt(b);
+                        assert!(r == e);
+                        let r = a.ge(b);
+                        assert!(r == e);
+                        let r = a.eq(b);
+                        assert!(r == e);
+
+                        let mut a = a;
+                        let mut b = b;
+                        let mut e = e;
+                        for i in 0..$id::lanes() {
+                            if i % 2 == 0 {
+                                a = a.replace(i, $false);
+                                b = b.replace(i, $true);
+                                e = e.replace(i, true);
+                            } else {
+                                a = a.replace(i, $true);
+                                b = b.replace(i, $false);
+                                e = e.replace(i, false);
+                            }
+                        }
+                        let r = a.lt(b);
+                        assert!(r == e);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/default.rs.html b/src/packed_simd/api/default.rs.html new file mode 100644 index 000000000..2685d78b3 --- /dev/null +++ b/src/packed_simd/api/default.rs.html @@ -0,0 +1,59 @@ +default.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+
+//! Implements `Default` for vector types.
+
+macro_rules! impl_default {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl Default for $id {
+            #[inline]
+            fn default() -> Self {
+                Self::splat($elem_ty::default())
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _default>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn default() {
+                        let a = $id::default();
+                        for i in 0..$id::lanes() {
+                            assert_eq!(a.extract(i), $elem_ty::default());
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/fmt.rs.html b/src/packed_simd/api/fmt.rs.html new file mode 100644 index 000000000..bebcda294 --- /dev/null +++ b/src/packed_simd/api/fmt.rs.html @@ -0,0 +1,27 @@ +fmt.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+
+//! Implements formatting APIs
+
+#[macro_use]
+mod debug;
+#[macro_use]
+mod lower_hex;
+#[macro_use]
+mod upper_hex;
+#[macro_use]
+mod octal;
+#[macro_use]
+mod binary;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/fmt/binary.rs.html b/src/packed_simd/api/fmt/binary.rs.html new file mode 100644 index 000000000..5cd20c4a8 --- /dev/null +++ b/src/packed_simd/api/fmt/binary.rs.html @@ -0,0 +1,115 @@ +binary.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+
+//! Implement Octal formatting
+
+macro_rules! impl_fmt_binary {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::fmt::Binary for $id {
+            #[allow(clippy::missing_inline_in_public_items)]
+            fn fmt(
+                &self, f: &mut crate::fmt::Formatter<'_>,
+            ) -> crate::fmt::Result {
+                write!(f, "{}(", stringify!($id))?;
+                for i in 0..$elem_count {
+                    if i > 0 {
+                        write!(f, ", ")?;
+                    }
+                    self.extract(i).fmt(f)?;
+                }
+                write!(f, ")")
+            }
+        }
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _fmt_binary>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn binary() {
+                        use arrayvec::{ArrayString,ArrayVec};
+                        type TinyString = ArrayString<[u8; 512]>;
+
+                        use crate::fmt::Write;
+                        let v = $id::splat($elem_ty::default());
+                        let mut s = TinyString::new();
+                        write!(&mut s, "{:#b}", v).unwrap();
+
+                        let mut beg = TinyString::new();
+                        write!(&mut beg, "{}(", stringify!($id)).unwrap();
+                        assert!(s.starts_with(beg.as_str()));
+                        assert!(s.ends_with(")"));
+                        let s: ArrayVec<[TinyString; 64]>
+                            = s.replace(beg.as_str(), "")
+                            .replace(")", "").split(",")
+                            .map(|v| TinyString::from(v.trim()).unwrap())
+                            .collect();
+                        assert_eq!(s.len(), $id::lanes());
+                        for (index, ss) in s.into_iter().enumerate() {
+                            let mut e = TinyString::new();
+                            write!(&mut e, "{:#b}", v.extract(index)).unwrap();
+                            assert_eq!(ss, e);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/fmt/debug.rs.html b/src/packed_simd/api/fmt/debug.rs.html new file mode 100644 index 000000000..e4d9488f1 --- /dev/null +++ b/src/packed_simd/api/fmt/debug.rs.html @@ -0,0 +1,127 @@ +debug.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+
+//! Implement debug formatting
+
+macro_rules! impl_fmt_debug_tests {
+    ([$elem_ty:ty; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _fmt_debug>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn debug() {
+                        use arrayvec::{ArrayString,ArrayVec};
+                        type TinyString = ArrayString<[u8; 512]>;
+
+                        use crate::fmt::Write;
+                        let v = $id::default();
+                        let mut s = TinyString::new();
+                        write!(&mut s, "{:?}", v).unwrap();
+
+                        let mut beg = TinyString::new();
+                        write!(&mut beg, "{}(", stringify!($id)).unwrap();
+                        assert!(s.starts_with(beg.as_str()));
+                        assert!(s.ends_with(")"));
+                        let s: ArrayVec<[TinyString; 64]>
+                            = s.replace(beg.as_str(), "")
+                            .replace(")", "").split(",")
+                            .map(|v| TinyString::from(v.trim()).unwrap())
+                            .collect();
+                        assert_eq!(s.len(), $id::lanes());
+                        for (index, ss) in s.into_iter().enumerate() {
+                            let mut e = TinyString::new();
+                            write!(&mut e, "{:?}", v.extract(index)).unwrap();
+                            assert_eq!(ss, e);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! impl_fmt_debug {
+    ([$elem_ty:ty; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::fmt::Debug for $id {
+            #[allow(clippy::missing_inline_in_public_items)]
+            fn fmt(
+                &self, f: &mut crate::fmt::Formatter<'_>,
+            ) -> crate::fmt::Result {
+                write!(f, "{}(", stringify!($id))?;
+                for i in 0..$elem_count {
+                    if i > 0 {
+                        write!(f, ", ")?;
+                    }
+                    self.extract(i).fmt(f)?;
+                }
+                write!(f, ")")
+            }
+        }
+        impl_fmt_debug_tests!([$elem_ty; $elem_count]: $id | $test_tt);
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/fmt/lower_hex.rs.html b/src/packed_simd/api/fmt/lower_hex.rs.html new file mode 100644 index 000000000..973f7d3c8 --- /dev/null +++ b/src/packed_simd/api/fmt/lower_hex.rs.html @@ -0,0 +1,115 @@ +lower_hex.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+
+//! Implement `LowerHex` formatting
+
+macro_rules! impl_fmt_lower_hex {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::fmt::LowerHex for $id {
+            #[allow(clippy::missing_inline_in_public_items)]
+            fn fmt(
+                &self, f: &mut crate::fmt::Formatter<'_>,
+            ) -> crate::fmt::Result {
+                write!(f, "{}(", stringify!($id))?;
+                for i in 0..$elem_count {
+                    if i > 0 {
+                        write!(f, ", ")?;
+                    }
+                    self.extract(i).fmt(f)?;
+                }
+                write!(f, ")")
+            }
+        }
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _fmt_lower_hex>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn lower_hex() {
+                        use arrayvec::{ArrayString,ArrayVec};
+                        type TinyString = ArrayString<[u8; 512]>;
+
+                        use crate::fmt::Write;
+                        let v = $id::splat($elem_ty::default());
+                        let mut s = TinyString::new();
+                        write!(&mut s, "{:#x}", v).unwrap();
+
+                        let mut beg = TinyString::new();
+                        write!(&mut beg, "{}(", stringify!($id)).unwrap();
+                        assert!(s.starts_with(beg.as_str()));
+                        assert!(s.ends_with(")"));
+                        let s: ArrayVec<[TinyString; 64]>
+                            = s.replace(beg.as_str(), "").replace(")", "")
+                            .split(",")
+                            .map(|v| TinyString::from(v.trim()).unwrap())
+                            .collect();
+                        assert_eq!(s.len(), $id::lanes());
+                        for (index, ss) in s.into_iter().enumerate() {
+                            let mut e = TinyString::new();
+                            write!(&mut e, "{:#x}", v.extract(index)).unwrap();
+                        assert_eq!(ss, e);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/fmt/octal.rs.html b/src/packed_simd/api/fmt/octal.rs.html new file mode 100644 index 000000000..952e0ccba --- /dev/null +++ b/src/packed_simd/api/fmt/octal.rs.html @@ -0,0 +1,115 @@ +octal.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+
+//! Implement Octal formatting
+
+macro_rules! impl_fmt_octal {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::fmt::Octal for $id {
+            #[allow(clippy::missing_inline_in_public_items)]
+            fn fmt(
+                &self, f: &mut crate::fmt::Formatter<'_>,
+            ) -> crate::fmt::Result {
+                write!(f, "{}(", stringify!($id))?;
+                for i in 0..$elem_count {
+                    if i > 0 {
+                        write!(f, ", ")?;
+                    }
+                    self.extract(i).fmt(f)?;
+                }
+                write!(f, ")")
+            }
+        }
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _fmt_octal>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn octal_hex() {
+                        use arrayvec::{ArrayString,ArrayVec};
+                        type TinyString = ArrayString<[u8; 512]>;
+
+                        use crate::fmt::Write;
+                        let v = $id::splat($elem_ty::default());
+                        let mut s = TinyString::new();
+                        write!(&mut s, "{:#o}", v).unwrap();
+
+                        let mut beg = TinyString::new();
+                        write!(&mut beg, "{}(", stringify!($id)).unwrap();
+                        assert!(s.starts_with(beg.as_str()));
+                        assert!(s.ends_with(")"));
+                        let s: ArrayVec<[TinyString; 64]>
+                            = s.replace(beg.as_str(), "").replace(")", "")
+                            .split(",")
+                            .map(|v| TinyString::from(v.trim()).unwrap())
+                            .collect();
+                        assert_eq!(s.len(), $id::lanes());
+                        for (index, ss) in s.into_iter().enumerate() {
+                            let mut e = TinyString::new();
+                            write!(&mut e, "{:#o}", v.extract(index)).unwrap();
+                            assert_eq!(ss, e);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/fmt/upper_hex.rs.html b/src/packed_simd/api/fmt/upper_hex.rs.html new file mode 100644 index 000000000..2bf323ca8 --- /dev/null +++ b/src/packed_simd/api/fmt/upper_hex.rs.html @@ -0,0 +1,115 @@ +upper_hex.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+
+//! Implement `UpperHex` formatting
+
+macro_rules! impl_fmt_upper_hex {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::fmt::UpperHex for $id {
+            #[allow(clippy::missing_inline_in_public_items)]
+            fn fmt(
+                &self, f: &mut crate::fmt::Formatter<'_>,
+            ) -> crate::fmt::Result {
+                write!(f, "{}(", stringify!($id))?;
+                for i in 0..$elem_count {
+                    if i > 0 {
+                        write!(f, ", ")?;
+                    }
+                    self.extract(i).fmt(f)?;
+                }
+                write!(f, ")")
+            }
+        }
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _fmt_upper_hex>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn upper_hex() {
+                        use arrayvec::{ArrayString,ArrayVec};
+                        type TinyString = ArrayString<[u8; 512]>;
+
+                        use crate::fmt::Write;
+                        let v = $id::splat($elem_ty::default());
+                        let mut s = TinyString::new();
+                        write!(&mut s, "{:#X}", v).unwrap();
+
+                        let mut beg = TinyString::new();
+                        write!(&mut beg, "{}(", stringify!($id)).unwrap();
+                        assert!(s.starts_with(beg.as_str()));
+                        assert!(s.ends_with(")"));
+                        let s: ArrayVec<[TinyString; 64]>
+                            = s.replace(beg.as_str(), "").replace(")", "")
+                            .split(",")
+                            .map(|v| TinyString::from(v.trim()).unwrap())
+                            .collect();
+                        assert_eq!(s.len(), $id::lanes());
+                        for (index, ss) in s.into_iter().enumerate() {
+                            let mut e = TinyString::new();
+                            write!(&mut e, "{:#X}", v.extract(index)).unwrap();
+                            assert_eq!(ss, e);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/from.rs.html b/src/packed_simd/api/from.rs.html new file mode 100644 index 000000000..f0abe9994 --- /dev/null +++ b/src/packed_simd/api/from.rs.html @@ -0,0 +1,17 @@ +from.rs.html -- source
1
+2
+3
+4
+5
+6
+7
+
+//! Implementations of the `From` and `Into` traits
+
+#[macro_use]
+mod from_array;
+
+#[macro_use]
+mod from_vector;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/from/from_array.rs.html b/src/packed_simd/api/from/from_array.rs.html new file mode 100644 index 000000000..bea105543 --- /dev/null +++ b/src/packed_simd/api/from/from_array.rs.html @@ -0,0 +1,245 @@ +from_array.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+
+//! Implements `From<[T; N]>` and `Into<[T; N]>` for vector types.
+
+macro_rules! impl_from_array {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt
+     | ($non_default_array:expr, $non_default_vec:expr)) => {
+        impl From<[$elem_ty; $elem_count]> for $id {
+            #[inline]
+            fn from(array: [$elem_ty; $elem_count]) -> Self {
+                union U {
+                    array: [$elem_ty; $elem_count],
+                    vec: $id,
+                }
+                unsafe { U { array }.vec }
+            }
+        }
+
+        impl From<$id> for [$elem_ty; $elem_count] {
+            #[inline]
+            fn from(vec: $id) -> Self {
+                union U {
+                    array: [$elem_ty; $elem_count],
+                    vec: $id,
+                }
+                unsafe { U { vec }.array }
+            }
+        }
+
+        // FIXME: `Into::into` is not inline, but due to
+        // the blanket impl in `std`, which is not
+        // marked `default`, we cannot override it here with
+        // specialization.
+        /*
+        impl Into<[$elem_ty; $elem_count]> for $id {
+            #[inline]
+            fn into(self) -> [$elem_ty; $elem_count] {
+                union U {
+                    array: [$elem_ty; $elem_count],
+                    vec: $id,
+                }
+                unsafe { U { vec: self }.array }
+            }
+        }
+
+        impl Into<$id> for [$elem_ty; $elem_count] {
+            #[inline]
+            fn into(self) -> $id {
+                union U {
+                    array: [$elem_ty; $elem_count],
+                    vec: $id,
+                }
+                unsafe { U { array: self }.vec }
+            }
+        }
+        */
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                mod [<$id _from>] {
+                    use super::*;
+                    #[test]
+                    fn array() {
+                        let vec: $id = Default::default();
+
+                        // FIXME: Workaround for arrays with more than 32
+                        // elements.
+                        //
+                        // Safe because we never take a reference to any
+                        // uninitialized element.
+                        union W {
+                            array: [$elem_ty; $elem_count],
+                            other: ()
+                        }
+                        let mut array = W { other: () };
+                        for i in 0..$elem_count {
+                            let default: $elem_ty = Default::default();
+                            // note: array.other is the active member and
+                            // initialized so we can take a reference to it:
+                            let p = unsafe {
+                                &mut array.other as *mut () as *mut $elem_ty
+                            };
+                            // note: default is a valid bit-pattern for
+                            // $elem_ty:
+                            unsafe {
+                                crate::ptr::write(p.wrapping_add(i), default)
+                            };
+                        }
+                        // note: the array variant of the union is properly
+                        // initialized:
+                        let mut array = unsafe {
+                            array.array
+                        };
+
+                        array[0] = $non_default_array;
+                        let vec = vec.replace(0, $non_default_vec);
+
+                        let vec_from_array = $id::from(array);
+                        assert_eq!(vec_from_array, vec);
+                        let array_from_vec
+                            = <[$elem_ty; $elem_count]>::from(vec);
+                        // FIXME: Workaround for arrays with more than 32
+                        // elements.
+                        for i in 0..$elem_count {
+                            assert_eq!(array_from_vec[i], array[i]);
+                        }
+
+                        let vec_from_into_array: $id = array.into();
+                        assert_eq!(vec_from_into_array, vec);
+                        let array_from_into_vec: [$elem_ty; $elem_count]
+                            = vec.into();
+                        // FIXME: Workaround for arrays with more than 32
+                        // elements.
+                        for i in 0..$elem_count {
+                            assert_eq!(array_from_into_vec[i], array[i]);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/from/from_vector.rs.html b/src/packed_simd/api/from/from_vector.rs.html new file mode 100644 index 000000000..584be95a0 --- /dev/null +++ b/src/packed_simd/api/from/from_vector.rs.html @@ -0,0 +1,137 @@ +from_vector.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+
+//! Implements `From` and `Into` for vector types.
+
+macro_rules! impl_from_vector {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt
+     | $source:ident) => {
+        impl From<$source> for $id {
+            #[inline]
+            fn from(source: $source) -> Self {
+                fn static_assert_same_number_of_lanes<T, U>()
+                where
+                    T: crate::sealed::Simd,
+                    U: crate::sealed::Simd<LanesType = T::LanesType>,
+                {
+                }
+                use crate::llvm::simd_cast;
+                static_assert_same_number_of_lanes::<$id, $source>();
+                Simd(unsafe { simd_cast(source.0) })
+            }
+        }
+
+        // FIXME: `Into::into` is not inline, but due to the blanket impl in
+        // `std`, which is not marked `default`, we cannot override it here
+        // with specialization.
+
+        /*
+           impl Into<$id> for $source {
+               #[inline]
+               fn into(self) -> $id {
+                   unsafe { simd_cast(self) }
+               }
+           }
+        */
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _from_ $source>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn from() {
+                        assert_eq!($id::lanes(), $source::lanes());
+                        let source: $source = Default::default();
+                        let vec: $id = Default::default();
+
+                        let e = $id::from(source);
+                        assert_eq!(e, vec);
+
+                        let e: $id = source.into();
+                        assert_eq!(e, vec);
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! impl_from_vectors {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt
+     | $($source:ident),*) => {
+        $(
+            impl_from_vector!(
+                [$elem_ty; $elem_count]: $id | $test_tt | $source
+            );
+        )*
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/hash.rs.html b/src/packed_simd/api/hash.rs.html new file mode 100644 index 000000000..6036997f8 --- /dev/null +++ b/src/packed_simd/api/hash.rs.html @@ -0,0 +1,97 @@ +hash.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+
+//! Implements `Hash` for vector types.
+
+macro_rules! impl_hash {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::hash::Hash for $id {
+            #[inline]
+            fn hash<H: crate::hash::Hasher>(&self, state: &mut H) {
+                unsafe {
+                    union A {
+                        data: [$elem_ty; $id::lanes()],
+                        vec: $id,
+                    }
+                    A { vec: *self }.data.hash(state)
+                }
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _hash>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn hash() {
+                        use crate::hash::{Hash, Hasher};
+                        #[allow(deprecated)]
+                        use crate::hash::{SipHasher13};
+                        type A = [$elem_ty; $id::lanes()];
+                        let a: A = [42 as $elem_ty; $id::lanes()];
+                        assert_eq!(
+                            crate::mem::size_of::<A>(),
+                            crate::mem::size_of::<$id>()
+                        );
+                        #[allow(deprecated)]
+                        let mut a_hash = SipHasher13::new();
+                        let mut v_hash = a_hash.clone();
+                        a.hash(&mut a_hash);
+
+                        let v = $id::splat(42 as $elem_ty);
+                        v.hash(&mut v_hash);
+                        assert_eq!(a_hash.finish(), v_hash.finish());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits.rs.html b/src/packed_simd/api/into_bits.rs.html new file mode 100644 index 000000000..d8ca0b302 --- /dev/null +++ b/src/packed_simd/api/into_bits.rs.html @@ -0,0 +1,121 @@ +into_bits.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+
+//! Implementation of `FromBits` and `IntoBits`.
+
+/// Safe lossless bitwise conversion from `T` to `Self`.
+pub trait FromBits<T>: crate::marker::Sized {
+    /// Safe lossless bitwise transmute from `T` to `Self`.
+    fn from_bits(t: T) -> Self;
+}
+
+/// Safe lossless bitwise conversion from `Self` to `T`.
+pub trait IntoBits<T>: crate::marker::Sized {
+    /// Safe lossless bitwise transmute from `self` to `T`.
+    fn into_bits(self) -> T;
+}
+
+/// `FromBits` implies `IntoBits`.
+impl<T, U> IntoBits<U> for T
+where
+    U: FromBits<T>,
+{
+    #[inline]
+    fn into_bits(self) -> U {
+        debug_assert!(
+            crate::mem::size_of::<Self>() == crate::mem::size_of::<U>()
+        );
+        U::from_bits(self)
+    }
+}
+
+/// `FromBits` and `IntoBits` are reflexive
+impl<T> FromBits<T> for T {
+    #[inline]
+    fn from_bits(t: Self) -> Self {
+        t
+    }
+}
+
+#[macro_use]
+mod macros;
+
+mod v16;
+pub use self::v16::*;
+
+mod v32;
+pub use self::v32::*;
+
+mod v64;
+pub use self::v64::*;
+
+mod v128;
+pub use self::v128::*;
+
+mod v256;
+pub use self::v256::*;
+
+mod v512;
+pub use self::v512::*;
+
+mod arch_specific;
+pub use self::arch_specific::*;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/arch_specific.rs.html b/src/packed_simd/api/into_bits/arch_specific.rs.html new file mode 100644 index 000000000..3278f1e49 --- /dev/null +++ b/src/packed_simd/api/into_bits/arch_specific.rs.html @@ -0,0 +1,383 @@ +arch_specific.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+
+//! `FromBits` and `IntoBits` between portable vector types and the
+//! architecture-specific vector types.
+#![rustfmt::skip]
+
+// FIXME: MIPS FromBits/IntoBits
+
+#[allow(unused)]
+use crate::*;
+
+/// This macro implements FromBits for the portable and the architecture
+/// specific vector types.
+///
+/// The "leaf" case is at the bottom, and the most generic case is at the top.
+/// The generic case is split into smaller cases recursively.
+macro_rules! impl_arch {
+    ([$arch_head_i:ident[$arch_head_tt:tt]: $($arch_head_ty:ident),*],
+     $([$arch_tail_i:ident[$arch_tail_tt:tt]: $($arch_tail_ty:ident),*]),* |
+     from: $($from_ty:ident),* | into: $($into_ty:ident),* |
+     test: $test_tt:tt) => {
+        impl_arch!(
+            [$arch_head_i[$arch_head_tt]: $($arch_head_ty),*] |
+            from: $($from_ty),* |
+            into: $($into_ty),* |
+            test: $test_tt
+        );
+        impl_arch!(
+            $([$arch_tail_i[$arch_tail_tt]: $($arch_tail_ty),*]),* |
+            from: $($from_ty),* |
+            into: $($into_ty),* |
+            test: $test_tt
+        );
+    };
+    ([$arch:ident[$arch_tt:tt]: $($arch_ty:ident),*] |
+     from: $($from_ty:ident),* | into: $($into_ty:ident),* |
+     test: $test_tt:tt) => {
+        // note: if target is "arm", "+v7,+neon" must be enabled
+        // and the std library must be recompiled with them
+        #[cfg(any(
+            not(target_arch = "arm"),
+            all(target_feature = "v7", target_feature = "neon",
+                any(feature = "core_arch", libcore_neon)))
+        )]
+        // note: if target is "powerpc", "altivec" must be enabled
+        // and the std library must be recompiled with it
+        #[cfg(any(
+            not(target_arch = "powerpc"),
+            all(target_feature = "altivec", feature = "core_arch"),
+        ))]
+        #[cfg(target_arch = $arch_tt)]
+        use crate::arch::$arch::{
+            $($arch_ty),*
+        };
+
+        #[cfg(any(
+            not(target_arch = "arm"),
+            all(target_feature = "v7", target_feature = "neon",
+                any(feature = "core_arch", libcore_neon)))
+        )]
+        #[cfg(any(
+            not(target_arch = "powerpc"),
+            all(target_feature = "altivec", feature = "core_arch"),
+        ))]
+        #[cfg(target_arch = $arch_tt)]
+        impl_arch!($($arch_ty),* | $($from_ty),* | $($into_ty),* |
+                   test: $test_tt);
+    };
+    ($arch_head:ident, $($arch_tail:ident),* | $($from_ty:ident),*
+     | $($into_ty:ident),* | test: $test_tt:tt) => {
+        impl_arch!($arch_head | $($from_ty),* | $($into_ty),* |
+                   test: $test_tt);
+        impl_arch!($($arch_tail),* | $($from_ty),* | $($into_ty),* |
+                   test: $test_tt);
+    };
+    ($arch_head:ident | $($from_ty:ident),* | $($into_ty:ident),* |
+     test: $test_tt:tt) => {
+        impl_from_bits!($arch_head[$test_tt]: $($from_ty),*);
+        impl_into_bits!($arch_head[$test_tt]: $($into_ty),*);
+    };
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementations for the 64-bit wide vector types:
+
+// FIXME: 64-bit single element types
+// FIXME: arm/aarch float16x4_t missing
+impl_arch!(
+    [x86["x86"]: __m64], [x86_64["x86_64"]: __m64],
+    [arm["arm"]: int8x8_t, uint8x8_t, poly8x8_t, int16x4_t, uint16x4_t,
+     poly16x4_t, int32x2_t, uint32x2_t, float32x2_t, int64x1_t,
+     uint64x1_t],
+    [aarch64["aarch64"]: int8x8_t, uint8x8_t, poly8x8_t, int16x4_t, uint16x4_t,
+     poly16x4_t, int32x2_t, uint32x2_t, float32x2_t, int64x1_t, uint64x1_t,
+     float64x1_t] |
+    from: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2 |
+    into: i8x8, u8x8, i16x4, u16x4, i32x2, u32x2, f32x2 |
+    test: test_v64
+);
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementations for the 128-bit wide vector types:
+
+// FIXME: arm/aarch float16x8_t missing
+// FIXME: ppc vector_pixel missing
+// FIXME: ppc64 vector_Float16 missing
+// FIXME: ppc64 vector_signed_long_long missing
+// FIXME: ppc64 vector_unsigned_long_long missing
+// FIXME: ppc64 vector_bool_long_long missing
+// FIXME: ppc64 vector_signed___int128 missing
+// FIXME: ppc64 vector_unsigned___int128 missing
+impl_arch!(
+    [x86["x86"]: __m128, __m128i, __m128d],
+    [x86_64["x86_64"]:  __m128, __m128i, __m128d],
+    [arm["arm"]: int8x16_t, uint8x16_t, poly8x16_t, int16x8_t, uint16x8_t,
+     poly16x8_t, int32x4_t, uint32x4_t, float32x4_t, int64x2_t, uint64x2_t],
+    [aarch64["aarch64"]: int8x16_t, uint8x16_t, poly8x16_t, int16x8_t,
+     uint16x8_t, poly16x8_t, int32x4_t, uint32x4_t, float32x4_t, int64x2_t,
+     uint64x2_t, float64x2_t],
+    [powerpc["powerpc"]: vector_signed_char, vector_unsigned_char,
+     vector_signed_short, vector_unsigned_short, vector_signed_int,
+     vector_unsigned_int, vector_float],
+    [powerpc64["powerpc64"]: vector_signed_char, vector_unsigned_char,
+     vector_signed_short, vector_unsigned_short, vector_signed_int,
+     vector_unsigned_int,  vector_float, vector_signed_long,
+     vector_unsigned_long, vector_double] |
+    from: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4,
+    i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1 |
+    into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4, i64x2, u64x2, f64x2,
+    i128x1, u128x1 |
+    test: test_v128
+);
+
+impl_arch!(
+    [powerpc["powerpc"]: vector_bool_char],
+    [powerpc64["powerpc64"]: vector_bool_char] |
+    from: m8x16, m16x8, m32x4, m64x2, m128x1 |
+    into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
+    i64x2, u64x2, f64x2, i128x1, u128x1,
+    // Masks:
+    m8x16 |
+    test: test_v128
+);
+
+impl_arch!(
+    [powerpc["powerpc"]: vector_bool_short],
+    [powerpc64["powerpc64"]: vector_bool_short] |
+    from: m16x8, m32x4, m64x2, m128x1 |
+    into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
+    i64x2, u64x2, f64x2, i128x1, u128x1,
+    // Masks:
+    m8x16, m16x8 |
+    test: test_v128
+);
+
+impl_arch!(
+    [powerpc["powerpc"]: vector_bool_int],
+    [powerpc64["powerpc64"]: vector_bool_int] |
+    from: m32x4, m64x2, m128x1 |
+    into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
+    i64x2, u64x2, f64x2, i128x1, u128x1,
+    // Masks:
+    m8x16, m16x8, m32x4 |
+    test: test_v128
+);
+
+impl_arch!(
+    [powerpc64["powerpc64"]: vector_bool_long] |
+    from: m64x2, m128x1 |
+    into: i8x16, u8x16, i16x8, u16x8, i32x4, u32x4, f32x4,
+    i64x2, u64x2, f64x2, i128x1, u128x1,
+    // Masks:
+    m8x16, m16x8, m32x4, m64x2 |
+    test: test_v128
+);
+
+////////////////////////////////////////////////////////////////////////////////
+// Implementations for the 256-bit wide vector types
+
+impl_arch!(
+    [x86["x86"]: __m256, __m256i, __m256d],
+    [x86_64["x86_64"]:  __m256, __m256i, __m256d] |
+    from: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16,
+    i32x8, u32x8, f32x8, m32x8,
+    i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2 |
+    into: i8x32, u8x32, i16x16, u16x16, i32x8, u32x8, f32x8,
+    i64x4, u64x4, f64x4, i128x2, u128x2 |
+    test: test_v256
+);
+
+////////////////////////////////////////////////////////////////////////////////
+// FIXME: Implementations for the 512-bit wide vector types
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/macros.rs.html b/src/packed_simd/api/into_bits/macros.rs.html new file mode 100644 index 000000000..aa83c8121 --- /dev/null +++ b/src/packed_simd/api/into_bits/macros.rs.html @@ -0,0 +1,151 @@ +macros.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+
+//! Macros implementing `FromBits`
+
+macro_rules! impl_from_bits_ {
+    ($id:ident[$test_tt:tt]: $from_ty:ident) => {
+        impl crate::api::into_bits::FromBits<$from_ty> for $id {
+            #[inline]
+            fn from_bits(x: $from_ty) -> Self {
+                unsafe { crate::mem::transmute(x) }
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _from_bits_ $from_ty>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn test() {
+                        use crate::{
+                            ptr::{read_unaligned},
+                            mem::{size_of, zeroed}
+                        };
+                        use crate::IntoBits;
+                        assert_eq!(size_of::<$id>(),
+                                   size_of::<$from_ty>());
+                        // This is safe becasue we never create a reference to
+                        // uninitialized memory:
+                        let a: $from_ty = unsafe { zeroed() };
+
+                        let b_0: $id = crate::FromBits::from_bits(a);
+                        let b_1: $id = a.into_bits();
+
+                        // Check that these are byte-wise equal, that is,
+                        // that the bit patterns are identical:
+                        for i in 0..size_of::<$id>() {
+                            // This is safe because we only read initialized
+                            // memory in bounds. Also, taking a reference to
+                            // `b_i` is ok because the fields are initialized.
+                            unsafe {
+                                let b_0_v: u8 = read_unaligned(
+                                    (&b_0 as *const $id as *const u8)
+                                        .wrapping_add(i)
+                                );
+                                let b_1_v: u8 = read_unaligned(
+                                    (&b_1 as *const $id as *const u8)
+                                        .wrapping_add(i)
+                                );
+                                assert_eq!(b_0_v, b_1_v);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! impl_from_bits {
+    ($id:ident[$test_tt:tt]: $($from_ty:ident),*) => {
+        $(
+            impl_from_bits_!($id[$test_tt]: $from_ty);
+        )*
+    }
+}
+
+#[allow(unused)]
+macro_rules! impl_into_bits {
+    ($id:ident[$test_tt:tt]: $($from_ty:ident),*) => {
+        $(
+            impl_from_bits_!($from_ty[$test_tt]: $id);
+        )*
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/v128.rs.html b/src/packed_simd/api/into_bits/v128.rs.html new file mode 100644 index 000000000..dd250b212 --- /dev/null +++ b/src/packed_simd/api/into_bits/v128.rs.html @@ -0,0 +1,59 @@ +v128.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+
+//! `FromBits` and `IntoBits` implementations for portable 128-bit wide vectors
+#![rustfmt::skip]
+
+#[allow(unused)]  // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x16[test_v128]: u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(u8x16[test_v128]: i8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(m8x16[test_v128]: m16x8, m32x4, m64x2, m128x1);
+
+impl_from_bits!(i16x8[test_v128]: i8x16, u8x16, m8x16, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(u16x8[test_v128]: i8x16, u8x16, m8x16, i16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(m16x8[test_v128]: m32x4, m64x2, m128x1);
+
+impl_from_bits!(i32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(u32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(f32x4[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(m32x4[test_v128]: m64x2, m128x1);
+
+impl_from_bits!(i64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, u64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(u64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, f64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(f64x2[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, m64x2, i128x1, u128x1, m128x1);
+impl_from_bits!(m64x2[test_v128]: m128x1);
+
+impl_from_bits!(i128x1[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, u128x1, m128x1);
+impl_from_bits!(u128x1[test_v128]: i8x16, u8x16, m8x16, i16x8, u16x8, m16x8, i32x4, u32x4, f32x4, m32x4, i64x2, u64x2, f64x2, m64x2, i128x1, m128x1);
+// note: m128x1 cannot be constructed from all the other masks bit patterns in here
+
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/v16.rs.html b/src/packed_simd/api/into_bits/v16.rs.html new file mode 100644 index 000000000..0a72e11df --- /dev/null +++ b/src/packed_simd/api/into_bits/v16.rs.html @@ -0,0 +1,21 @@ +v16.rs.html -- source
1
+2
+3
+4
+5
+6
+7
+8
+9
+
+//! `FromBits` and `IntoBits` implementations for portable 16-bit wide vectors
+#![rustfmt::skip]
+
+#[allow(unused)]  // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x2[test_v16]: u8x2, m8x2);
+impl_from_bits!(u8x2[test_v16]: i8x2, m8x2);
+// note: m8x2 cannot be constructed from all i8x2 or u8x2 bit patterns
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/v256.rs.html b/src/packed_simd/api/into_bits/v256.rs.html new file mode 100644 index 000000000..acbc4a207 --- /dev/null +++ b/src/packed_simd/api/into_bits/v256.rs.html @@ -0,0 +1,57 @@ +v256.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+
+//! `FromBits` and `IntoBits` implementations for portable 256-bit wide vectors
+#![rustfmt::skip]
+
+#[allow(unused)]  // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x32[test_v256]: u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(u8x32[test_v256]: i8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(m8x32[test_v256]: m16x16, m32x8, m64x4, m128x2);
+
+impl_from_bits!(i16x16[test_v256]: i8x32, u8x32, m8x32, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(u16x16[test_v256]: i8x32, u8x32, m8x32, i16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(m16x16[test_v256]: m32x8, m64x4, m128x2);
+
+impl_from_bits!(i32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(u32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(f32x8[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(m32x8[test_v256]: m64x4, m128x2);
+
+impl_from_bits!(i64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, u64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(u64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, f64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(f64x4[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, m64x4, i128x2, u128x2, m128x2);
+impl_from_bits!(m64x4[test_v256]: m128x2);
+
+impl_from_bits!(i128x2[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, u128x2, m128x2);
+impl_from_bits!(u128x2[test_v256]: i8x32, u8x32, m8x32, i16x16, u16x16, m16x16, i32x8, u32x8, f32x8, m32x8, i64x4, u64x4, f64x4, m64x4, i128x2, m128x2);
+// note: m128x2 cannot be constructed from all the other masks bit patterns in here
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/v32.rs.html b/src/packed_simd/api/into_bits/v32.rs.html new file mode 100644 index 000000000..6ba823846 --- /dev/null +++ b/src/packed_simd/api/into_bits/v32.rs.html @@ -0,0 +1,29 @@ +v32.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+
+//! `FromBits` and `IntoBits` implementations for portable 32-bit wide vectors
+#![rustfmt::skip]
+
+#[allow(unused)]  // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x4[test_v32]: u8x4, m8x4, i16x2, u16x2, m16x2);
+impl_from_bits!(u8x4[test_v32]: i8x4, m8x4, i16x2, u16x2, m16x2);
+impl_from_bits!(m8x4[test_v32]: m16x2);
+
+impl_from_bits!(i16x2[test_v32]: i8x4, u8x4, m8x4, u16x2, m16x2);
+impl_from_bits!(u16x2[test_v32]: i8x4, u8x4, m8x4, i16x2, m16x2);
+// note: m16x2 cannot be constructed from all m8x4 bit patterns
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/v512.rs.html b/src/packed_simd/api/into_bits/v512.rs.html new file mode 100644 index 000000000..9bd9f9231 --- /dev/null +++ b/src/packed_simd/api/into_bits/v512.rs.html @@ -0,0 +1,57 @@ +v512.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+
+//! `FromBits` and `IntoBits` implementations for portable 512-bit wide vectors
+#![rustfmt::skip]
+
+#[allow(unused)]  // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x64[test_v512]: u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(u8x64[test_v512]: i8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(m8x64[test_v512]: m16x32, m32x16, m64x8, m128x4);
+
+impl_from_bits!(i16x32[test_v512]: i8x64, u8x64, m8x64, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(u16x32[test_v512]: i8x64, u8x64, m8x64, i16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(m16x32[test_v512]: m32x16, m64x8, m128x4);
+
+impl_from_bits!(i32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(u32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(f32x16[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(m32x16[test_v512]: m64x8, m128x4);
+
+impl_from_bits!(i64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, u64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(u64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, f64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(f64x8[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, m64x8, i128x4, u128x4, m128x4);
+impl_from_bits!(m64x8[test_v512]: m128x4);
+
+impl_from_bits!(i128x4[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, u128x4, m128x4);
+impl_from_bits!(u128x4[test_v512]: i8x64, u8x64, m8x64, i16x32, u16x32, m16x32, i32x16, u32x16, f32x16, m32x16, i64x8, u64x8, f64x8, m64x8, i128x4, m128x4);
+// note: m128x4 cannot be constructed from all the other masks bit patterns in here
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/into_bits/v64.rs.html b/src/packed_simd/api/into_bits/v64.rs.html new file mode 100644 index 000000000..811679851 --- /dev/null +++ b/src/packed_simd/api/into_bits/v64.rs.html @@ -0,0 +1,39 @@ +v64.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+
+//! `FromBits` and `IntoBits` implementations for portable 64-bit wide vectors
+#![rustfmt::skip]
+
+#[allow(unused)]  // wasm_bindgen_test
+use crate::*;
+
+impl_from_bits!(i8x8[test_v64]: u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(u8x8[test_v64]: i8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(m8x8[test_v64]: m16x4, m32x2);
+
+impl_from_bits!(i16x4[test_v64]: i8x8, u8x8, m8x8, u16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(u16x4[test_v64]: i8x8, u8x8, m8x8, i16x4, m16x4, i32x2, u32x2, f32x2, m32x2);
+impl_from_bits!(m16x4[test_v64]: m32x2);
+
+impl_from_bits!(i32x2[test_v64]: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, u32x2, f32x2, m32x2);
+impl_from_bits!(u32x2[test_v64]: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, f32x2, m32x2);
+impl_from_bits!(f32x2[test_v64]: i8x8, u8x8, m8x8, i16x4, u16x4, m16x4, i32x2, u32x2, m32x2);
+// note: m32x2 cannot be constructed from all m16x4 or m8x8 bit patterns
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math.rs.html b/src/packed_simd/api/math.rs.html new file mode 100644 index 000000000..3efd96a3e --- /dev/null +++ b/src/packed_simd/api/math.rs.html @@ -0,0 +1,11 @@ +math.rs.html -- source
1
+2
+3
+4
+
+//! Implements vertical math operations
+
+#[macro_use]
+mod float;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float.rs.html b/src/packed_simd/api/math/float.rs.html new file mode 100644 index 000000000..fb967d85c --- /dev/null +++ b/src/packed_simd/api/math/float.rs.html @@ -0,0 +1,131 @@ +float.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+
+//! Implements vertical floating-point math operations.
+
+#[macro_use]
+mod abs;
+
+#[macro_use]
+mod consts;
+
+#[macro_use]
+mod cos;
+
+#[macro_use]
+mod exp;
+
+#[macro_use]
+mod powf;
+
+#[macro_use]
+mod ln;
+
+#[macro_use]
+mod mul_add;
+
+#[macro_use]
+mod mul_adde;
+
+#[macro_use]
+mod recpre;
+
+#[macro_use]
+mod rsqrte;
+
+#[macro_use]
+mod sin;
+
+#[macro_use]
+mod sqrt;
+
+#[macro_use]
+mod sqrte;
+
+#[macro_use]
+mod tanh;
+
+macro_rules! impl_float_category {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident, $mask_ty:ident) => {
+        impl $id {
+            #[inline]
+            pub fn is_nan(self) -> $mask_ty {
+                self.ne(self)
+            }
+
+            #[inline]
+            pub fn is_infinite(self) -> $mask_ty {
+                self.eq(Self::INFINITY) | self.eq(Self::NEG_INFINITY)
+            }
+
+            #[inline]
+            pub fn is_finite(self) -> $mask_ty {
+                !(self.is_nan() | self.is_infinite())
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/abs.rs.html b/src/packed_simd/api/math/float/abs.rs.html new file mode 100644 index 000000000..ed1c2c6c1 --- /dev/null +++ b/src/packed_simd/api/math/float/abs.rs.html @@ -0,0 +1,65 @@ +abs.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+
+//! Implements vertical (lane-wise) floating-point `abs`.
+
+macro_rules! impl_math_float_abs {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Absolute value.
+            #[inline]
+            pub fn abs(self) -> Self {
+                use crate::codegen::math::float::abs::Abs;
+                Abs::abs(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_abs>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn abs() {
+                        let o = $id::splat(1 as $elem_ty);
+                        assert_eq!(o, o.abs());
+
+                        let mo = $id::splat(-1 as $elem_ty);
+                        assert_eq!(o, mo.abs());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/consts.rs.html b/src/packed_simd/api/math/float/consts.rs.html new file mode 100644 index 000000000..a56b88d2d --- /dev/null +++ b/src/packed_simd/api/math/float/consts.rs.html @@ -0,0 +1,175 @@ +consts.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+
+macro_rules! impl_float_consts {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident) => {
+        impl $id {
+            /// Machine epsilon value.
+            pub const EPSILON: $id = $id::splat(core::$elem_ty::EPSILON);
+
+            /// Smallest finite value.
+            pub const MIN: $id = $id::splat(core::$elem_ty::MIN);
+
+            /// Smallest positive normal value.
+            pub const MIN_POSITIVE: $id =
+                $id::splat(core::$elem_ty::MIN_POSITIVE);
+
+            /// Largest finite value.
+            pub const MAX: $id = $id::splat(core::$elem_ty::MAX);
+
+            /// Not a Number (NaN).
+            pub const NAN: $id = $id::splat(core::$elem_ty::NAN);
+
+            /// Infinity (∞).
+            pub const INFINITY: $id = $id::splat(core::$elem_ty::INFINITY);
+
+            /// Negative infinity (-∞).
+            pub const NEG_INFINITY: $id =
+                $id::splat(core::$elem_ty::NEG_INFINITY);
+
+            /// Archimedes' constant (π)
+            pub const PI: $id = $id::splat(core::$elem_ty::consts::PI);
+
+            /// π/2
+            pub const FRAC_PI_2: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_PI_2);
+
+            /// π/3
+            pub const FRAC_PI_3: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_PI_3);
+
+            /// π/4
+            pub const FRAC_PI_4: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_PI_4);
+
+            /// π/6
+            pub const FRAC_PI_6: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_PI_6);
+
+            /// π/8
+            pub const FRAC_PI_8: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_PI_8);
+
+            /// 1/π
+            pub const FRAC_1_PI: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_1_PI);
+
+            /// 2/π
+            pub const FRAC_2_PI: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_2_PI);
+
+            /// 2/sqrt(π)
+            pub const FRAC_2_SQRT_PI: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_2_SQRT_PI);
+
+            /// sqrt(2)
+            pub const SQRT_2: $id = $id::splat(core::$elem_ty::consts::SQRT_2);
+
+            /// 1/sqrt(2)
+            pub const FRAC_1_SQRT_2: $id =
+                $id::splat(core::$elem_ty::consts::FRAC_1_SQRT_2);
+
+            /// Euler's number (e)
+            pub const E: $id = $id::splat(core::$elem_ty::consts::E);
+
+            /// log<sub>2</sub>(e)
+            pub const LOG2_E: $id = $id::splat(core::$elem_ty::consts::LOG2_E);
+
+            /// log<sub>10</sub>(e)
+            pub const LOG10_E: $id =
+                $id::splat(core::$elem_ty::consts::LOG10_E);
+
+            /// ln(2)
+            pub const LN_2: $id = $id::splat(core::$elem_ty::consts::LN_2);
+
+            /// ln(10)
+            pub const LN_10: $id = $id::splat(core::$elem_ty::consts::LN_10);
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/cos.rs.html b/src/packed_simd/api/math/float/cos.rs.html new file mode 100644 index 000000000..95373e1ec --- /dev/null +++ b/src/packed_simd/api/math/float/cos.rs.html @@ -0,0 +1,91 @@ +cos.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+
+//! Implements vertical (lane-wise) floating-point `cos`.
+
+macro_rules! impl_math_float_cos {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Cosine.
+            #[inline]
+            pub fn cos(self) -> Self {
+                use crate::codegen::math::float::cos::Cos;
+                Cos::cos(self)
+            }
+
+            /// Cosine of `self * PI`.
+            #[inline]
+            pub fn cos_pi(self) -> Self {
+                use crate::codegen::math::float::cos_pi::CosPi;
+                CosPi::cos_pi(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_cos>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn cos() {
+                        use crate::$elem_ty::consts::PI;
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let p = $id::splat(PI as $elem_ty);
+                        let ph = $id::splat(PI as $elem_ty / 2.);
+                        let z_r = $id::splat((PI as $elem_ty / 2.).cos());
+                        let o_r = $id::splat((PI as $elem_ty).cos());
+
+                        assert_eq!(o, z.cos());
+                        assert_eq!(z_r, ph.cos());
+                        assert_eq!(o_r, p.cos());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/exp.rs.html b/src/packed_simd/api/math/float/exp.rs.html new file mode 100644 index 000000000..ec3d4743b --- /dev/null +++ b/src/packed_simd/api/math/float/exp.rs.html @@ -0,0 +1,69 @@ +exp.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+
+//! Implements vertical (lane-wise) floating-point `exp`.
+
+macro_rules! impl_math_float_exp {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Returns the exponential function of `self`: `e^(self)`.
+            #[inline]
+            pub fn exp(self) -> Self {
+                use crate::codegen::math::float::exp::Exp;
+                Exp::exp(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_exp>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn exp() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        assert_eq!(o, z.exp());
+
+                        let e = $id::splat(crate::f64::consts::E as $elem_ty);
+                        let tol = $id::splat(2.4e-4 as $elem_ty);
+                        assert!((e - o.exp()).abs().le(tol).all());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/ln.rs.html b/src/packed_simd/api/math/float/ln.rs.html new file mode 100644 index 000000000..4180f1d98 --- /dev/null +++ b/src/packed_simd/api/math/float/ln.rs.html @@ -0,0 +1,69 @@ +ln.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+
+//! Implements vertical (lane-wise) floating-point `ln`.
+
+macro_rules! impl_math_float_ln {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Returns the natural logarithm of `self`.
+            #[inline]
+            pub fn ln(self) -> Self {
+                use crate::codegen::math::float::ln::Ln;
+                Ln::ln(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_ln>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn ln() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        assert_eq!(z, o.ln());
+
+                        let e = $id::splat(crate::f64::consts::E as $elem_ty);
+                        let tol = $id::splat(2.4e-4 as $elem_ty);
+                        assert!((o - e.ln()).abs().le(tol).all());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/mul_add.rs.html b/src/packed_simd/api/math/float/mul_add.rs.html new file mode 100644 index 000000000..e467c893e --- /dev/null +++ b/src/packed_simd/api/math/float/mul_add.rs.html @@ -0,0 +1,91 @@ +mul_add.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+
+//! Implements vertical (lane-wise) floating-point `mul_add`.
+
+macro_rules! impl_math_float_mul_add {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Fused multiply add: `self * y + z`
+            #[inline]
+            pub fn mul_add(self, y: Self, z: Self) -> Self {
+                use crate::codegen::math::float::mul_add::MulAdd;
+                MulAdd::mul_add(self, y, z)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_mul_add>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn mul_add() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let t3 = $id::splat(3 as $elem_ty);
+                        let f = $id::splat(4 as $elem_ty);
+
+                        assert_eq!(z, z.mul_add(z, z));
+                        assert_eq!(o, o.mul_add(o, z));
+                        assert_eq!(o, o.mul_add(z, o));
+                        assert_eq!(o, z.mul_add(o, o));
+
+                        assert_eq!(t, o.mul_add(o, o));
+                        assert_eq!(t, o.mul_add(t, z));
+                        assert_eq!(t, t.mul_add(o, z));
+
+                        assert_eq!(f, t.mul_add(t, z));
+                        assert_eq!(f, t.mul_add(o, t));
+                        assert_eq!(t3, t.mul_add(o, o));
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/mul_adde.rs.html b/src/packed_simd/api/math/float/mul_adde.rs.html new file mode 100644 index 000000000..a92daf836 --- /dev/null +++ b/src/packed_simd/api/math/float/mul_adde.rs.html @@ -0,0 +1,99 @@ +mul_adde.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+
+//! Implements vertical (lane-wise) floating-point `mul_adde`.
+
+macro_rules! impl_math_float_mul_adde {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Fused multiply add estimate: ~= `self * y + z`
+            ///
+            /// While fused multiply-add (`fma`) has infinite precision,
+            /// `mul_adde` has _at worst_ the same precision of a multiply followed by an add.
+            /// This might be more efficient on architectures that do not have an `fma` instruction.
+            #[inline]
+            pub fn mul_adde(self, y: Self, z: Self) -> Self {
+                use crate::codegen::math::float::mul_adde::MulAddE;
+                MulAddE::mul_adde(self, y, z)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_mul_adde>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn mul_adde() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let t3 = $id::splat(3 as $elem_ty);
+                        let f = $id::splat(4 as $elem_ty);
+
+                        assert_eq!(z, z.mul_adde(z, z));
+                        assert_eq!(o, o.mul_adde(o, z));
+                        assert_eq!(o, o.mul_adde(z, o));
+                        assert_eq!(o, z.mul_adde(o, o));
+
+                        assert_eq!(t, o.mul_adde(o, o));
+                        assert_eq!(t, o.mul_adde(t, z));
+                        assert_eq!(t, t.mul_adde(o, z));
+
+                        assert_eq!(f, t.mul_adde(t, z));
+                        assert_eq!(f, t.mul_adde(o, t));
+                        assert_eq!(t3, t.mul_adde(o, o));
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/powf.rs.html b/src/packed_simd/api/math/float/powf.rs.html new file mode 100644 index 000000000..cc26fe4c3 --- /dev/null +++ b/src/packed_simd/api/math/float/powf.rs.html @@ -0,0 +1,75 @@ +powf.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+
+//! Implements vertical (lane-wise) floating-point `powf`.
+
+macro_rules! impl_math_float_powf {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Raises `self` number to the floating point power of `x`.
+            #[inline]
+            pub fn powf(self, x: Self) -> Self {
+                use crate::codegen::math::float::powf::Powf;
+                Powf::powf(self, x)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_powf>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn powf() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        assert_eq!(o, o.powf(z));
+                        assert_eq!(o, t.powf(z));
+                        assert_eq!(o, o.powf(o));
+                        assert_eq!(t, t.powf(o));
+
+                        let f = $id::splat(4 as $elem_ty);
+                        assert_eq!(f, t.powf(t));
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/recpre.rs.html b/src/packed_simd/api/math/float/recpre.rs.html new file mode 100644 index 000000000..696f4ed34 --- /dev/null +++ b/src/packed_simd/api/math/float/recpre.rs.html @@ -0,0 +1,75 @@ +recpre.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+
+//! Implements vertical (lane-wise) floating-point `recpre`.
+
+macro_rules! impl_math_float_recpre {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Reciprocal estimate: `~= 1. / self`.
+            ///
+            /// FIXME: The precision of the estimate is currently unspecified.
+            #[inline]
+            pub fn recpre(self) -> Self {
+                $id::splat(1.) / self
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_recpre>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn recpre() {
+                        let tol = $id::splat(2.4e-4 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let error = (o - o.recpre()).abs();
+                        assert!(error.le(tol).all());
+
+                        let t = $id::splat(2 as $elem_ty);
+                        let e = 0.5;
+                        let error = (e - t.recpre()).abs();
+                        assert!(error.le(tol).all());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/rsqrte.rs.html b/src/packed_simd/api/math/float/rsqrte.rs.html new file mode 100644 index 000000000..876231c5b --- /dev/null +++ b/src/packed_simd/api/math/float/rsqrte.rs.html @@ -0,0 +1,83 @@ +rsqrte.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+
+//! Implements vertical (lane-wise) floating-point `rsqrte`.
+
+macro_rules! impl_math_float_rsqrte {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Reciprocal square-root estimate: `~= 1. / self.sqrt()`.
+            ///
+            /// FIXME: The precision of the estimate is currently unspecified.
+            #[inline]
+            pub fn rsqrte(self) -> Self {
+                unsafe {
+                    use crate::llvm::simd_fsqrt;
+                    $id::splat(1.) / Simd(simd_fsqrt(self.0))
+                }
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_rsqrte>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn rsqrte() {
+                        use crate::$elem_ty::consts::SQRT_2;
+                        let tol = $id::splat(2.4e-4 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let error = (o - o.rsqrte()).abs();
+                        assert!(error.le(tol).all());
+
+                        let t = $id::splat(2 as $elem_ty);
+                        let e = 1. / SQRT_2;
+                        let error = (e - t.rsqrte()).abs();
+                        assert!(error.le(tol).all());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/sin.rs.html b/src/packed_simd/api/math/float/sin.rs.html new file mode 100644 index 000000000..5a35c7c48 --- /dev/null +++ b/src/packed_simd/api/math/float/sin.rs.html @@ -0,0 +1,103 @@ +sin.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+
+//! Implements vertical (lane-wise) floating-point `sin`.
+
+macro_rules! impl_math_float_sin {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Sine.
+            #[inline]
+            pub fn sin(self) -> Self {
+                use crate::codegen::math::float::sin::Sin;
+                Sin::sin(self)
+            }
+
+            /// Sine of `self * PI`.
+            #[inline]
+            pub fn sin_pi(self) -> Self {
+                use crate::codegen::math::float::sin_pi::SinPi;
+                SinPi::sin_pi(self)
+            }
+
+            /// Sine and cosine of `self * PI`.
+            #[inline]
+            pub fn sin_cos_pi(self) -> (Self, Self) {
+                use crate::codegen::math::float::sin_cos_pi::SinCosPi;
+                SinCosPi::sin_cos_pi(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_sin>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn sin() {
+                        use crate::$elem_ty::consts::PI;
+                        let z = $id::splat(0 as $elem_ty);
+                        let p = $id::splat(PI as $elem_ty);
+                        let ph = $id::splat(PI as $elem_ty / 2.);
+                        let o_r = $id::splat((PI as $elem_ty / 2.).sin());
+                        let z_r = $id::splat((PI as $elem_ty).sin());
+
+                        assert_eq!(z, z.sin());
+                        assert_eq!(o_r, ph.sin());
+                        assert_eq!(z_r, p.sin());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/sqrt.rs.html b/src/packed_simd/api/math/float/sqrt.rs.html new file mode 100644 index 000000000..a73d0944a --- /dev/null +++ b/src/packed_simd/api/math/float/sqrt.rs.html @@ -0,0 +1,73 @@ +sqrt.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+
+//! Implements vertical (lane-wise) floating-point `sqrt`.
+
+macro_rules! impl_math_float_sqrt {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            #[inline]
+            pub fn sqrt(self) -> Self {
+                use crate::codegen::math::float::sqrt::Sqrt;
+                Sqrt::sqrt(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_sqrt>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn sqrt() {
+                        use crate::$elem_ty::consts::SQRT_2;
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        assert_eq!(z, z.sqrt());
+                        assert_eq!(o, o.sqrt());
+
+                        let t = $id::splat(2 as $elem_ty);
+                        let e = $id::splat(SQRT_2);
+                        assert_eq!(e, t.sqrt());
+
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/sqrte.rs.html b/src/packed_simd/api/math/float/sqrte.rs.html new file mode 100644 index 000000000..3cb07eb89 --- /dev/null +++ b/src/packed_simd/api/math/float/sqrte.rs.html @@ -0,0 +1,91 @@ +sqrte.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+
+//! Implements vertical (lane-wise) floating-point `sqrte`.
+
+macro_rules! impl_math_float_sqrte {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Square-root estimate.
+            ///
+            /// FIXME: The precision of the estimate is currently unspecified.
+            #[inline]
+            pub fn sqrte(self) -> Self {
+                use crate::codegen::math::float::sqrte::Sqrte;
+                Sqrte::sqrte(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_sqrte>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn sqrte() {
+                        use crate::$elem_ty::consts::SQRT_2;
+                        let tol = $id::splat(2.4e-4 as $elem_ty);
+
+                        let z = $id::splat(0 as $elem_ty);
+                        let error = (z - z.sqrte()).abs();
+                        assert!(error.le(tol).all());
+
+                        let o = $id::splat(1 as $elem_ty);
+                        let error = (o - o.sqrte()).abs();
+                        assert!(error.le(tol).all());
+
+                        let t = $id::splat(2 as $elem_ty);
+                        let e = $id::splat(SQRT_2 as $elem_ty);
+                        let error = (e - t.sqrte()).abs();
+
+                        assert!(error.le(tol).all());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/math/float/tanh.rs.html b/src/packed_simd/api/math/float/tanh.rs.html new file mode 100644 index 000000000..026e5e029 --- /dev/null +++ b/src/packed_simd/api/math/float/tanh.rs.html @@ -0,0 +1,61 @@ +tanh.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+
+//! Implements vertical (lane-wise) floating-point `tanh`.
+
+macro_rules! impl_math_float_tanh {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Tanh.
+            #[inline]
+            pub fn tanh(self) -> Self {
+                use crate::codegen::math::float::tanh::Tanh;
+                Tanh::tanh(self)
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _math_tanh>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn tanh() {
+                        let z = $id::splat(0 as $elem_ty);
+
+                        assert_eq!(z, z.tanh());
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/minimal.rs.html b/src/packed_simd/api/minimal.rs.html new file mode 100644 index 000000000..f7d2c4c94 --- /dev/null +++ b/src/packed_simd/api/minimal.rs.html @@ -0,0 +1,15 @@ +minimal.rs.html -- source
1
+2
+3
+4
+5
+6
+
+#[macro_use]
+mod iuf;
+#[macro_use]
+mod mask;
+#[macro_use]
+mod ptr;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/minimal/iuf.rs.html b/src/packed_simd/api/minimal/iuf.rs.html new file mode 100644 index 000000000..32a392a80 --- /dev/null +++ b/src/packed_simd/api/minimal/iuf.rs.html @@ -0,0 +1,337 @@ +iuf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+
+//! Minimal API of signed integer, unsigned integer, and floating-point
+//! vectors.
+
+macro_rules! impl_minimal_iuf {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $ielem_ty:ident |
+     $test_tt:tt | $($elem_name:ident),+ | $(#[$doc:meta])*) => {
+
+        $(#[$doc])*
+        pub type $id = Simd<[$elem_ty; $elem_count]>;
+
+        impl sealed::Simd for $id {
+            type Element = $elem_ty;
+            const LANES: usize = $elem_count;
+            type LanesType = [u32; $elem_count];
+        }
+
+        impl $id {
+            /// Creates a new instance with each vector elements initialized
+            /// with the provided values.
+            #[inline]
+            #[allow(clippy::too_many_arguments)]
+            pub const fn new($($elem_name: $elem_ty),*) -> Self {
+                Simd(codegen::$id($($elem_name as $ielem_ty),*))
+            }
+
+            /// Returns the number of vector lanes.
+            #[inline]
+            pub const fn lanes() -> usize {
+                $elem_count
+            }
+
+            /// Constructs a new instance with each element initialized to
+            /// `value`.
+            #[inline]
+            pub const fn splat(value: $elem_ty) -> Self {
+                Simd(codegen::$id($({
+                    #[allow(non_camel_case_types, dead_code)]
+                    struct $elem_name;
+                    value as $ielem_ty
+                }),*))
+            }
+
+            /// Extracts the value at `index`.
+            ///
+            /// # Panics
+            ///
+            /// If `index >= Self::lanes()`.
+            #[inline]
+            pub fn extract(self, index: usize) -> $elem_ty {
+                assert!(index < $elem_count);
+                unsafe { self.extract_unchecked(index) }
+            }
+
+            /// Extracts the value at `index`.
+            ///
+            /// # Precondition
+            ///
+            /// If `index >= Self::lanes()` the behavior is undefined.
+            #[inline]
+            pub unsafe fn extract_unchecked(self, index: usize) -> $elem_ty {
+                use crate::llvm::simd_extract;
+                let e: $ielem_ty = simd_extract(self.0, index as u32);
+                e as $elem_ty
+            }
+
+            /// Returns a new vector where the value at `index` is replaced by `new_value`.
+            ///
+            /// # Panics
+            ///
+            /// If `index >= Self::lanes()`.
+            #[inline]
+            #[must_use = "replace does not modify the original value - \
+                          it returns a new vector with the value at `index` \
+                          replaced by `new_value`d"
+            ]
+            pub fn replace(self, index: usize, new_value: $elem_ty) -> Self {
+                assert!(index < $elem_count);
+                unsafe { self.replace_unchecked(index, new_value) }
+            }
+
+            /// Returns a new vector where the value at `index` is replaced by `new_value`.
+            ///
+            /// # Precondition
+            ///
+            /// If `index >= Self::lanes()` the behavior is undefined.
+            #[inline]
+            #[must_use = "replace_unchecked does not modify the original value - \
+                          it returns a new vector with the value at `index` \
+                          replaced by `new_value`d"
+            ]
+            pub unsafe fn replace_unchecked(
+                self,
+                index: usize,
+                new_value: $elem_ty,
+            ) -> Self {
+                use crate::llvm::simd_insert;
+                Simd(simd_insert(self.0, index as u32, new_value as $ielem_ty))
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _minimal>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn minimal() {
+                        // lanes:
+                        assert_eq!($elem_count, $id::lanes());
+
+                        // splat and extract / extract_unchecked:
+                        const VAL: $elem_ty = 7 as $elem_ty;
+                        const VEC: $id = $id::splat(VAL);
+                        for i in 0..$id::lanes() {
+                            assert_eq!(VAL, VEC.extract(i));
+                            assert_eq!(
+                                VAL, unsafe { VEC.extract_unchecked(i) }
+                            );
+                        }
+
+                        // replace / replace_unchecked
+                        let new_vec = VEC.replace(0, 42 as $elem_ty);
+                        for i in 0..$id::lanes() {
+                            if i == 0 {
+                                assert_eq!(42 as $elem_ty, new_vec.extract(i));
+                            } else {
+                                assert_eq!(VAL, new_vec.extract(i));
+                            }
+                        }
+                        let new_vec = unsafe {
+                            VEC.replace_unchecked(0, 42 as $elem_ty)
+                        };
+                        for i in 0..$id::lanes() {
+                            if i == 0 {
+                                assert_eq!(42 as $elem_ty, new_vec.extract(i));
+                            } else {
+                                assert_eq!(VAL, new_vec.extract(i));
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn extract_panic_oob() {
+                        const VAL: $elem_ty = 7 as $elem_ty;
+                        const VEC: $id = $id::splat(VAL);
+                        let _ = VEC.extract($id::lanes());
+                    }
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn replace_panic_oob() {
+                        const VAL: $elem_ty = 7 as $elem_ty;
+                        const VEC: $id = $id::splat(VAL);
+                        let _ = VEC.replace($id::lanes(), 42 as $elem_ty);
+                    }
+                }
+            }
+        }
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/minimal/mask.rs.html b/src/packed_simd/api/minimal/mask.rs.html new file mode 100644 index 000000000..4f14c492f --- /dev/null +++ b/src/packed_simd/api/minimal/mask.rs.html @@ -0,0 +1,351 @@ +mask.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+
+//! Minimal API of mask vectors.
+
+macro_rules! impl_minimal_mask {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $ielem_ty:ident
+    | $test_tt:tt | $($elem_name:ident),+ | $(#[$doc:meta])*) => {
+        $(#[$doc])*
+        pub type $id = Simd<[$elem_ty; $elem_count]>;
+
+        impl sealed::Simd for $id {
+            type Element = $elem_ty;
+            const LANES: usize = $elem_count;
+            type LanesType = [u32; $elem_count];
+        }
+
+        impl $id {
+            /// Creates a new instance with each vector elements initialized
+            /// with the provided values.
+            #[inline]
+            #[allow(clippy::too_many_arguments)]
+            pub const fn new($($elem_name: bool),*) -> Self {
+                Simd(codegen::$id($(Self::bool_to_internal($elem_name)),*))
+            }
+
+            /// Converts a boolean type into the type of the vector lanes.
+            #[inline]
+            #[allow(clippy::indexing_slicing)]
+            const fn bool_to_internal(x: bool) -> $ielem_ty {
+                [0 as $ielem_ty, !(0 as $ielem_ty)][x as usize]
+            }
+
+            /// Returns the number of vector lanes.
+            #[inline]
+            pub const fn lanes() -> usize {
+                $elem_count
+            }
+
+            /// Constructs a new instance with each element initialized to
+            /// `value`.
+            #[inline]
+            pub const fn splat(value: bool) -> Self {
+                Simd(codegen::$id($({
+                    #[allow(non_camel_case_types, dead_code)]
+                    struct $elem_name;
+                    Self::bool_to_internal(value)
+                }),*))
+            }
+
+            /// Extracts the value at `index`.
+            ///
+            /// # Panics
+            ///
+            /// If `index >= Self::lanes()`.
+            #[inline]
+            pub fn extract(self, index: usize) -> bool {
+                assert!(index < $elem_count);
+                unsafe { self.extract_unchecked(index) }
+            }
+
+            /// Extracts the value at `index`.
+            ///
+            /// If `index >= Self::lanes()` the behavior is undefined.
+            #[inline]
+            pub unsafe fn extract_unchecked(self, index: usize) -> bool {
+                use crate::llvm::simd_extract;
+                let x: $ielem_ty = simd_extract(self.0, index as u32);
+                x != 0
+            }
+
+            /// Returns a new vector where the value at `index` is replaced by
+            /// `new_value`.
+            ///
+            /// # Panics
+            ///
+            /// If `index >= Self::lanes()`.
+            #[inline]
+            #[must_use = "replace does not modify the original value - \
+                          it returns a new vector with the value at `index` \
+                          replaced by `new_value`d"
+            ]
+            pub fn replace(self, index: usize, new_value: bool) -> Self {
+                assert!(index < $elem_count);
+                unsafe { self.replace_unchecked(index, new_value) }
+            }
+
+            /// Returns a new vector where the value at `index` is replaced by
+            /// `new_value`.
+            ///
+            /// # Panics
+            ///
+            /// If `index >= Self::lanes()`.
+            #[inline]
+            #[must_use = "replace_unchecked does not modify the original value - \
+                          it returns a new vector with the value at `index` \
+                          replaced by `new_value`d"
+            ]
+            pub unsafe fn replace_unchecked(
+                self,
+                index: usize,
+                new_value: bool,
+            ) -> Self {
+                use crate::llvm::simd_insert;
+                Simd(simd_insert(self.0, index as u32,
+                                 Self::bool_to_internal(new_value)))
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _minimal>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn minimal() {
+                        // TODO: test new
+
+                        // lanes:
+                        assert_eq!($elem_count, $id::lanes());
+
+                        // splat and extract / extract_unchecked:
+                        let vec = $id::splat(true);
+                        for i in 0..$id::lanes() {
+                            assert_eq!(true, vec.extract(i));
+                            assert_eq!(true,
+                                       unsafe { vec.extract_unchecked(i) }
+                            );
+                        }
+
+                        // replace / replace_unchecked
+                        let new_vec = vec.replace(0, false);
+                        for i in 0..$id::lanes() {
+                            if i == 0 {
+                                assert_eq!(false, new_vec.extract(i));
+                            } else {
+                                assert_eq!(true, new_vec.extract(i));
+                            }
+                        }
+                        let new_vec = unsafe {
+                            vec.replace_unchecked(0, false)
+                        };
+                        for i in 0..$id::lanes() {
+                            if i == 0 {
+                                assert_eq!(false, new_vec.extract(i));
+                            } else {
+                                assert_eq!(true, new_vec.extract(i));
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn extract_panic_oob() {
+                        let vec = $id::splat(false);
+                        let _ = vec.extract($id::lanes());
+                    }
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn replace_panic_oob() {
+                        let vec = $id::splat(false);
+                        let _ = vec.replace($id::lanes(), true);
+                    }
+                }
+            }
+        }
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/minimal/ptr.rs.html b/src/packed_simd/api/minimal/ptr.rs.html new file mode 100644 index 000000000..f22744b86 --- /dev/null +++ b/src/packed_simd/api/minimal/ptr.rs.html @@ -0,0 +1,2773 @@ +ptr.rs.html -- source
   1
+   2
+   3
+   4
+   5
+   6
+   7
+   8
+   9
+  10
+  11
+  12
+  13
+  14
+  15
+  16
+  17
+  18
+  19
+  20
+  21
+  22
+  23
+  24
+  25
+  26
+  27
+  28
+  29
+  30
+  31
+  32
+  33
+  34
+  35
+  36
+  37
+  38
+  39
+  40
+  41
+  42
+  43
+  44
+  45
+  46
+  47
+  48
+  49
+  50
+  51
+  52
+  53
+  54
+  55
+  56
+  57
+  58
+  59
+  60
+  61
+  62
+  63
+  64
+  65
+  66
+  67
+  68
+  69
+  70
+  71
+  72
+  73
+  74
+  75
+  76
+  77
+  78
+  79
+  80
+  81
+  82
+  83
+  84
+  85
+  86
+  87
+  88
+  89
+  90
+  91
+  92
+  93
+  94
+  95
+  96
+  97
+  98
+  99
+ 100
+ 101
+ 102
+ 103
+ 104
+ 105
+ 106
+ 107
+ 108
+ 109
+ 110
+ 111
+ 112
+ 113
+ 114
+ 115
+ 116
+ 117
+ 118
+ 119
+ 120
+ 121
+ 122
+ 123
+ 124
+ 125
+ 126
+ 127
+ 128
+ 129
+ 130
+ 131
+ 132
+ 133
+ 134
+ 135
+ 136
+ 137
+ 138
+ 139
+ 140
+ 141
+ 142
+ 143
+ 144
+ 145
+ 146
+ 147
+ 148
+ 149
+ 150
+ 151
+ 152
+ 153
+ 154
+ 155
+ 156
+ 157
+ 158
+ 159
+ 160
+ 161
+ 162
+ 163
+ 164
+ 165
+ 166
+ 167
+ 168
+ 169
+ 170
+ 171
+ 172
+ 173
+ 174
+ 175
+ 176
+ 177
+ 178
+ 179
+ 180
+ 181
+ 182
+ 183
+ 184
+ 185
+ 186
+ 187
+ 188
+ 189
+ 190
+ 191
+ 192
+ 193
+ 194
+ 195
+ 196
+ 197
+ 198
+ 199
+ 200
+ 201
+ 202
+ 203
+ 204
+ 205
+ 206
+ 207
+ 208
+ 209
+ 210
+ 211
+ 212
+ 213
+ 214
+ 215
+ 216
+ 217
+ 218
+ 219
+ 220
+ 221
+ 222
+ 223
+ 224
+ 225
+ 226
+ 227
+ 228
+ 229
+ 230
+ 231
+ 232
+ 233
+ 234
+ 235
+ 236
+ 237
+ 238
+ 239
+ 240
+ 241
+ 242
+ 243
+ 244
+ 245
+ 246
+ 247
+ 248
+ 249
+ 250
+ 251
+ 252
+ 253
+ 254
+ 255
+ 256
+ 257
+ 258
+ 259
+ 260
+ 261
+ 262
+ 263
+ 264
+ 265
+ 266
+ 267
+ 268
+ 269
+ 270
+ 271
+ 272
+ 273
+ 274
+ 275
+ 276
+ 277
+ 278
+ 279
+ 280
+ 281
+ 282
+ 283
+ 284
+ 285
+ 286
+ 287
+ 288
+ 289
+ 290
+ 291
+ 292
+ 293
+ 294
+ 295
+ 296
+ 297
+ 298
+ 299
+ 300
+ 301
+ 302
+ 303
+ 304
+ 305
+ 306
+ 307
+ 308
+ 309
+ 310
+ 311
+ 312
+ 313
+ 314
+ 315
+ 316
+ 317
+ 318
+ 319
+ 320
+ 321
+ 322
+ 323
+ 324
+ 325
+ 326
+ 327
+ 328
+ 329
+ 330
+ 331
+ 332
+ 333
+ 334
+ 335
+ 336
+ 337
+ 338
+ 339
+ 340
+ 341
+ 342
+ 343
+ 344
+ 345
+ 346
+ 347
+ 348
+ 349
+ 350
+ 351
+ 352
+ 353
+ 354
+ 355
+ 356
+ 357
+ 358
+ 359
+ 360
+ 361
+ 362
+ 363
+ 364
+ 365
+ 366
+ 367
+ 368
+ 369
+ 370
+ 371
+ 372
+ 373
+ 374
+ 375
+ 376
+ 377
+ 378
+ 379
+ 380
+ 381
+ 382
+ 383
+ 384
+ 385
+ 386
+ 387
+ 388
+ 389
+ 390
+ 391
+ 392
+ 393
+ 394
+ 395
+ 396
+ 397
+ 398
+ 399
+ 400
+ 401
+ 402
+ 403
+ 404
+ 405
+ 406
+ 407
+ 408
+ 409
+ 410
+ 411
+ 412
+ 413
+ 414
+ 415
+ 416
+ 417
+ 418
+ 419
+ 420
+ 421
+ 422
+ 423
+ 424
+ 425
+ 426
+ 427
+ 428
+ 429
+ 430
+ 431
+ 432
+ 433
+ 434
+ 435
+ 436
+ 437
+ 438
+ 439
+ 440
+ 441
+ 442
+ 443
+ 444
+ 445
+ 446
+ 447
+ 448
+ 449
+ 450
+ 451
+ 452
+ 453
+ 454
+ 455
+ 456
+ 457
+ 458
+ 459
+ 460
+ 461
+ 462
+ 463
+ 464
+ 465
+ 466
+ 467
+ 468
+ 469
+ 470
+ 471
+ 472
+ 473
+ 474
+ 475
+ 476
+ 477
+ 478
+ 479
+ 480
+ 481
+ 482
+ 483
+ 484
+ 485
+ 486
+ 487
+ 488
+ 489
+ 490
+ 491
+ 492
+ 493
+ 494
+ 495
+ 496
+ 497
+ 498
+ 499
+ 500
+ 501
+ 502
+ 503
+ 504
+ 505
+ 506
+ 507
+ 508
+ 509
+ 510
+ 511
+ 512
+ 513
+ 514
+ 515
+ 516
+ 517
+ 518
+ 519
+ 520
+ 521
+ 522
+ 523
+ 524
+ 525
+ 526
+ 527
+ 528
+ 529
+ 530
+ 531
+ 532
+ 533
+ 534
+ 535
+ 536
+ 537
+ 538
+ 539
+ 540
+ 541
+ 542
+ 543
+ 544
+ 545
+ 546
+ 547
+ 548
+ 549
+ 550
+ 551
+ 552
+ 553
+ 554
+ 555
+ 556
+ 557
+ 558
+ 559
+ 560
+ 561
+ 562
+ 563
+ 564
+ 565
+ 566
+ 567
+ 568
+ 569
+ 570
+ 571
+ 572
+ 573
+ 574
+ 575
+ 576
+ 577
+ 578
+ 579
+ 580
+ 581
+ 582
+ 583
+ 584
+ 585
+ 586
+ 587
+ 588
+ 589
+ 590
+ 591
+ 592
+ 593
+ 594
+ 595
+ 596
+ 597
+ 598
+ 599
+ 600
+ 601
+ 602
+ 603
+ 604
+ 605
+ 606
+ 607
+ 608
+ 609
+ 610
+ 611
+ 612
+ 613
+ 614
+ 615
+ 616
+ 617
+ 618
+ 619
+ 620
+ 621
+ 622
+ 623
+ 624
+ 625
+ 626
+ 627
+ 628
+ 629
+ 630
+ 631
+ 632
+ 633
+ 634
+ 635
+ 636
+ 637
+ 638
+ 639
+ 640
+ 641
+ 642
+ 643
+ 644
+ 645
+ 646
+ 647
+ 648
+ 649
+ 650
+ 651
+ 652
+ 653
+ 654
+ 655
+ 656
+ 657
+ 658
+ 659
+ 660
+ 661
+ 662
+ 663
+ 664
+ 665
+ 666
+ 667
+ 668
+ 669
+ 670
+ 671
+ 672
+ 673
+ 674
+ 675
+ 676
+ 677
+ 678
+ 679
+ 680
+ 681
+ 682
+ 683
+ 684
+ 685
+ 686
+ 687
+ 688
+ 689
+ 690
+ 691
+ 692
+ 693
+ 694
+ 695
+ 696
+ 697
+ 698
+ 699
+ 700
+ 701
+ 702
+ 703
+ 704
+ 705
+ 706
+ 707
+ 708
+ 709
+ 710
+ 711
+ 712
+ 713
+ 714
+ 715
+ 716
+ 717
+ 718
+ 719
+ 720
+ 721
+ 722
+ 723
+ 724
+ 725
+ 726
+ 727
+ 728
+ 729
+ 730
+ 731
+ 732
+ 733
+ 734
+ 735
+ 736
+ 737
+ 738
+ 739
+ 740
+ 741
+ 742
+ 743
+ 744
+ 745
+ 746
+ 747
+ 748
+ 749
+ 750
+ 751
+ 752
+ 753
+ 754
+ 755
+ 756
+ 757
+ 758
+ 759
+ 760
+ 761
+ 762
+ 763
+ 764
+ 765
+ 766
+ 767
+ 768
+ 769
+ 770
+ 771
+ 772
+ 773
+ 774
+ 775
+ 776
+ 777
+ 778
+ 779
+ 780
+ 781
+ 782
+ 783
+ 784
+ 785
+ 786
+ 787
+ 788
+ 789
+ 790
+ 791
+ 792
+ 793
+ 794
+ 795
+ 796
+ 797
+ 798
+ 799
+ 800
+ 801
+ 802
+ 803
+ 804
+ 805
+ 806
+ 807
+ 808
+ 809
+ 810
+ 811
+ 812
+ 813
+ 814
+ 815
+ 816
+ 817
+ 818
+ 819
+ 820
+ 821
+ 822
+ 823
+ 824
+ 825
+ 826
+ 827
+ 828
+ 829
+ 830
+ 831
+ 832
+ 833
+ 834
+ 835
+ 836
+ 837
+ 838
+ 839
+ 840
+ 841
+ 842
+ 843
+ 844
+ 845
+ 846
+ 847
+ 848
+ 849
+ 850
+ 851
+ 852
+ 853
+ 854
+ 855
+ 856
+ 857
+ 858
+ 859
+ 860
+ 861
+ 862
+ 863
+ 864
+ 865
+ 866
+ 867
+ 868
+ 869
+ 870
+ 871
+ 872
+ 873
+ 874
+ 875
+ 876
+ 877
+ 878
+ 879
+ 880
+ 881
+ 882
+ 883
+ 884
+ 885
+ 886
+ 887
+ 888
+ 889
+ 890
+ 891
+ 892
+ 893
+ 894
+ 895
+ 896
+ 897
+ 898
+ 899
+ 900
+ 901
+ 902
+ 903
+ 904
+ 905
+ 906
+ 907
+ 908
+ 909
+ 910
+ 911
+ 912
+ 913
+ 914
+ 915
+ 916
+ 917
+ 918
+ 919
+ 920
+ 921
+ 922
+ 923
+ 924
+ 925
+ 926
+ 927
+ 928
+ 929
+ 930
+ 931
+ 932
+ 933
+ 934
+ 935
+ 936
+ 937
+ 938
+ 939
+ 940
+ 941
+ 942
+ 943
+ 944
+ 945
+ 946
+ 947
+ 948
+ 949
+ 950
+ 951
+ 952
+ 953
+ 954
+ 955
+ 956
+ 957
+ 958
+ 959
+ 960
+ 961
+ 962
+ 963
+ 964
+ 965
+ 966
+ 967
+ 968
+ 969
+ 970
+ 971
+ 972
+ 973
+ 974
+ 975
+ 976
+ 977
+ 978
+ 979
+ 980
+ 981
+ 982
+ 983
+ 984
+ 985
+ 986
+ 987
+ 988
+ 989
+ 990
+ 991
+ 992
+ 993
+ 994
+ 995
+ 996
+ 997
+ 998
+ 999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
+1022
+1023
+1024
+1025
+1026
+1027
+1028
+1029
+1030
+1031
+1032
+1033
+1034
+1035
+1036
+1037
+1038
+1039
+1040
+1041
+1042
+1043
+1044
+1045
+1046
+1047
+1048
+1049
+1050
+1051
+1052
+1053
+1054
+1055
+1056
+1057
+1058
+1059
+1060
+1061
+1062
+1063
+1064
+1065
+1066
+1067
+1068
+1069
+1070
+1071
+1072
+1073
+1074
+1075
+1076
+1077
+1078
+1079
+1080
+1081
+1082
+1083
+1084
+1085
+1086
+1087
+1088
+1089
+1090
+1091
+1092
+1093
+1094
+1095
+1096
+1097
+1098
+1099
+1100
+1101
+1102
+1103
+1104
+1105
+1106
+1107
+1108
+1109
+1110
+1111
+1112
+1113
+1114
+1115
+1116
+1117
+1118
+1119
+1120
+1121
+1122
+1123
+1124
+1125
+1126
+1127
+1128
+1129
+1130
+1131
+1132
+1133
+1134
+1135
+1136
+1137
+1138
+1139
+1140
+1141
+1142
+1143
+1144
+1145
+1146
+1147
+1148
+1149
+1150
+1151
+1152
+1153
+1154
+1155
+1156
+1157
+1158
+1159
+1160
+1161
+1162
+1163
+1164
+1165
+1166
+1167
+1168
+1169
+1170
+1171
+1172
+1173
+1174
+1175
+1176
+1177
+1178
+1179
+1180
+1181
+1182
+1183
+1184
+1185
+1186
+1187
+1188
+1189
+1190
+1191
+1192
+1193
+1194
+1195
+1196
+1197
+1198
+1199
+1200
+1201
+1202
+1203
+1204
+1205
+1206
+1207
+1208
+1209
+1210
+1211
+1212
+1213
+1214
+1215
+1216
+1217
+1218
+1219
+1220
+1221
+1222
+1223
+1224
+1225
+1226
+1227
+1228
+1229
+1230
+1231
+1232
+1233
+1234
+1235
+1236
+1237
+1238
+1239
+1240
+1241
+1242
+1243
+1244
+1245
+1246
+1247
+1248
+1249
+1250
+1251
+1252
+1253
+1254
+1255
+1256
+1257
+1258
+1259
+1260
+1261
+1262
+1263
+1264
+1265
+1266
+1267
+1268
+1269
+1270
+1271
+1272
+1273
+1274
+1275
+1276
+1277
+1278
+1279
+1280
+1281
+1282
+1283
+1284
+1285
+1286
+1287
+1288
+1289
+1290
+1291
+1292
+1293
+1294
+1295
+1296
+1297
+1298
+1299
+1300
+1301
+1302
+1303
+1304
+1305
+1306
+1307
+1308
+1309
+1310
+1311
+1312
+1313
+1314
+1315
+1316
+1317
+1318
+1319
+1320
+1321
+1322
+1323
+1324
+1325
+1326
+1327
+1328
+1329
+1330
+1331
+1332
+1333
+1334
+1335
+1336
+1337
+1338
+1339
+1340
+1341
+1342
+1343
+1344
+1345
+1346
+1347
+1348
+1349
+1350
+1351
+1352
+1353
+1354
+1355
+1356
+1357
+1358
+1359
+1360
+1361
+1362
+1363
+1364
+1365
+1366
+1367
+1368
+1369
+1370
+1371
+1372
+1373
+1374
+1375
+1376
+1377
+1378
+1379
+1380
+1381
+1382
+1383
+1384
+1385
+
+//! Minimal API of pointer vectors.
+
+macro_rules! impl_minimal_p {
+    ([$elem_ty:ty; $elem_count:expr]: $id:ident, $mask_ty:ident,
+     $usize_ty:ident, $isize_ty:ident | $ref:ident | $test_tt:tt
+     | $($elem_name:ident),+ | ($true:expr, $false:expr) |
+     $(#[$doc:meta])*) => {
+
+        $(#[$doc])*
+        pub type $id<T> = Simd<[$elem_ty; $elem_count]>;
+
+        impl<T> sealed::Simd for $id<T> {
+            type Element = $elem_ty;
+            const LANES: usize = $elem_count;
+            type LanesType = [u32; $elem_count];
+        }
+
+        impl<T> $id<T> {
+            /// Creates a new instance with each vector elements initialized
+            /// with the provided values.
+            #[inline]
+            #[allow(clippy::too_many_arguments)]
+            pub const fn new($($elem_name: $elem_ty),*) -> Self {
+                Simd(codegen::$id($($elem_name),*))
+            }
+
+            /// Returns the number of vector lanes.
+            #[inline]
+            pub const fn lanes() -> usize {
+                $elem_count
+            }
+
+            /// Constructs a new instance with each element initialized to
+            /// `value`.
+            #[inline]
+            pub const fn splat(value: $elem_ty) -> Self {
+                Simd(codegen::$id($({
+                    #[allow(non_camel_case_types, dead_code)]
+                    struct $elem_name;
+                    value
+                }),*))
+            }
+
+            /// Constructs a new instance with each element initialized to
+            /// `null`.
+            #[inline]
+            pub const fn null() -> Self {
+                Self::splat(crate::ptr::null_mut() as $elem_ty)
+            }
+
+            /// Returns a mask that selects those lanes that contain `null`
+            /// pointers.
+            #[inline]
+            pub fn is_null(self) -> $mask_ty {
+                self.eq(Self::null())
+            }
+
+            /// Extracts the value at `index`.
+            ///
+            /// # Panics
+            ///
+            /// If `index >= Self::lanes()`.
+            #[inline]
+            pub fn extract(self, index: usize) -> $elem_ty {
+                assert!(index < $elem_count);
+                unsafe { self.extract_unchecked(index) }
+            }
+
+            /// Extracts the value at `index`.
+            ///
+            /// # Precondition
+            ///
+            /// If `index >= Self::lanes()` the behavior is undefined.
+            #[inline]
+            pub unsafe fn extract_unchecked(self, index: usize) -> $elem_ty {
+                use crate::llvm::simd_extract;
+                simd_extract(self.0, index as u32)
+            }
+
+            /// Returns a new vector where the value at `index` is replaced by
+            /// `new_value`.
+            ///
+            /// # Panics
+            ///
+            /// If `index >= Self::lanes()`.
+            #[inline]
+            #[must_use = "replace does not modify the original value - \
+                          it returns a new vector with the value at `index` \
+                          replaced by `new_value`d"
+            ]
+            #[allow(clippy::not_unsafe_ptr_arg_deref)]
+            pub fn replace(self, index: usize, new_value: $elem_ty) -> Self {
+                assert!(index < $elem_count);
+                unsafe { self.replace_unchecked(index, new_value) }
+            }
+
+            /// Returns a new vector where the value at `index` is replaced by `new_value`.
+            ///
+            /// # Precondition
+            ///
+            /// If `index >= Self::lanes()` the behavior is undefined.
+            #[inline]
+            #[must_use = "replace_unchecked does not modify the original value - \
+                          it returns a new vector with the value at `index` \
+                          replaced by `new_value`d"
+            ]
+            pub unsafe fn replace_unchecked(
+                self,
+                index: usize,
+                new_value: $elem_ty,
+            ) -> Self {
+                use crate::llvm::simd_insert;
+                Simd(simd_insert(self.0, index as u32, new_value))
+            }
+        }
+
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _minimal>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn minimal() {
+                        // lanes:
+                        assert_eq!($elem_count, $id::<i32>::lanes());
+
+                        // splat and extract / extract_unchecked:
+                        let VAL7: <$id<i32> as sealed::Simd>::Element
+                            = $ref!(7);
+                        let VAL42: <$id<i32> as sealed::Simd>::Element
+                            = $ref!(42);
+                        let VEC: $id<i32> = $id::splat(VAL7);
+                        for i in 0..$id::<i32>::lanes() {
+                            assert_eq!(VAL7, VEC.extract(i));
+                            assert_eq!(
+                                VAL7, unsafe { VEC.extract_unchecked(i) }
+                            );
+                        }
+
+                        // replace / replace_unchecked
+                        let new_vec = VEC.replace(0, VAL42);
+                        for i in 0..$id::<i32>::lanes() {
+                            if i == 0 {
+                                assert_eq!(VAL42, new_vec.extract(i));
+                            } else {
+                                assert_eq!(VAL7, new_vec.extract(i));
+                            }
+                        }
+                        let new_vec = unsafe {
+                            VEC.replace_unchecked(0, VAL42)
+                        };
+                        for i in 0..$id::<i32>::lanes() {
+                            if i == 0 {
+                                assert_eq!(VAL42, new_vec.extract(i));
+                            } else {
+                                assert_eq!(VAL7, new_vec.extract(i));
+                            }
+                        }
+
+                        let mut n = $id::<i32>::null();
+                        assert_eq!(
+                            n,
+                            $id::<i32>::splat(unsafe { crate::mem::zeroed() })
+                        );
+                        assert!(n.is_null().all());
+                        n = n.replace(
+                            0, unsafe { crate::mem::transmute(1_isize) }
+                        );
+                        assert!(!n.is_null().all());
+                        if $id::<i32>::lanes() > 1 {
+                            assert!(n.is_null().any());
+                        } else {
+                            assert!(!n.is_null().any());
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn extract_panic_oob() {
+                        let VAL: <$id<i32> as sealed::Simd>::Element
+                            = $ref!(7);
+                        let VEC: $id<i32> = $id::splat(VAL);
+                        let _ = VEC.extract($id::<i32>::lanes());
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn replace_panic_oob() {
+                        let VAL: <$id<i32> as sealed::Simd>::Element
+                            = $ref!(7);
+                        let VAL42: <$id<i32> as sealed::Simd>::Element
+                            = $ref!(42);
+                        let VEC: $id<i32> = $id::splat(VAL);
+                        let _ = VEC.replace($id::<i32>::lanes(), VAL42);
+                    }
+                }
+            }
+        }
+
+        impl<T> crate::fmt::Debug for $id<T> {
+            #[allow(clippy::missing_inline_in_public_items)]
+            fn fmt(&self, f: &mut crate::fmt::Formatter<'_>)
+                   -> crate::fmt::Result {
+                write!(
+                    f,
+                    "{}<{}>(",
+                    stringify!($id),
+                    unsafe { crate::intrinsics::type_name::<T>() }
+                )?;
+                for i in 0..$elem_count {
+                    if i > 0 {
+                        write!(f, ", ")?;
+                    }
+                    self.extract(i).fmt(f)?;
+                }
+                write!(f, ")")
+            }
+        }
+
+         test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _fmt_debug>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn debug() {
+                        use arrayvec::{ArrayString,ArrayVec};
+                        type TinyString = ArrayString<[u8; 512]>;
+
+                        use crate::fmt::Write;
+                        let v = $id::<i32>::default();
+                        let mut s = TinyString::new();
+                        write!(&mut s, "{:?}", v).unwrap();
+
+                        let mut beg = TinyString::new();
+                        write!(&mut beg, "{}<i32>(", stringify!($id)).unwrap();
+                        assert!(
+                            s.starts_with(beg.as_str()),
+                            "s = {} (should start with = {})", s, beg
+                        );
+                        assert!(s.ends_with(")"));
+                        let s: ArrayVec<[TinyString; 64]>
+                            = s.replace(beg.as_str(), "")
+                            .replace(")", "").split(",")
+                            .map(|v| TinyString::from(v.trim()).unwrap())
+                            .collect();
+                        assert_eq!(s.len(), $id::<i32>::lanes());
+                        for (index, ss) in s.into_iter().enumerate() {
+                            let mut e = TinyString::new();
+                            write!(&mut e, "{:?}", v.extract(index)).unwrap();
+                            assert_eq!(ss, e);
+                        }
+                    }
+                }
+            }
+         }
+
+        impl<T> Default for $id<T> {
+            #[inline]
+            fn default() -> Self {
+                // FIXME: ptrs do not implement default
+                Self::null()
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _default>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn default() {
+                        let a = $id::<i32>::default();
+                        for i in 0..$id::<i32>::lanes() {
+                            assert_eq!(
+                                a.extract(i), unsafe { crate::mem::zeroed() }
+                            );
+                        }
+                    }
+                }
+            }
+        }
+
+        impl<T> $id<T> {
+            /// Lane-wise equality comparison.
+            #[inline]
+            pub fn eq(self, other: Self) -> $mask_ty {
+                unsafe {
+                    use crate::llvm::simd_eq;
+                    let a: $usize_ty = crate::mem::transmute(self);
+                    let b: $usize_ty = crate::mem::transmute(other);
+                    Simd(simd_eq(a.0, b.0))
+                }
+            }
+
+            /// Lane-wise inequality comparison.
+            #[inline]
+            pub fn ne(self, other: Self) -> $mask_ty {
+                unsafe {
+                    use crate::llvm::simd_ne;
+                    let a: $usize_ty = crate::mem::transmute(self);
+                    let b: $usize_ty = crate::mem::transmute(other);
+                    Simd(simd_ne(a.0, b.0))
+                }
+            }
+
+            /// Lane-wise less-than comparison.
+            #[inline]
+            pub fn lt(self, other: Self) -> $mask_ty {
+                unsafe {
+                    use crate::llvm::simd_lt;
+                    let a: $usize_ty = crate::mem::transmute(self);
+                    let b: $usize_ty = crate::mem::transmute(other);
+                    Simd(simd_lt(a.0, b.0))
+                }
+            }
+
+            /// Lane-wise less-than-or-equals comparison.
+            #[inline]
+            pub fn le(self, other: Self) -> $mask_ty {
+                unsafe {
+                    use crate::llvm::simd_le;
+                    let a: $usize_ty = crate::mem::transmute(self);
+                    let b: $usize_ty = crate::mem::transmute(other);
+                    Simd(simd_le(a.0, b.0))
+                }
+            }
+
+            /// Lane-wise greater-than comparison.
+            #[inline]
+            pub fn gt(self, other: Self) -> $mask_ty {
+                unsafe {
+                    use crate::llvm::simd_gt;
+                    let a: $usize_ty = crate::mem::transmute(self);
+                    let b: $usize_ty = crate::mem::transmute(other);
+                    Simd(simd_gt(a.0, b.0))
+                }
+            }
+
+            /// Lane-wise greater-than-or-equals comparison.
+            #[inline]
+            pub fn ge(self, other: Self) -> $mask_ty {
+                unsafe {
+                    use crate::llvm::simd_ge;
+                    let a: $usize_ty = crate::mem::transmute(self);
+                    let b: $usize_ty = crate::mem::transmute(other);
+                    Simd(simd_ge(a.0, b.0))
+                }
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_vertical>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn cmp() {
+                        let a = $id::<i32>::null();
+                        let b = $id::<i32>::splat(unsafe {
+                            crate::mem::transmute(1_isize)
+                        });
+
+                        let r = a.lt(b);
+                        let e = $mask_ty::splat(true);
+                        assert!(r == e);
+                        let r = a.le(b);
+                        assert!(r == e);
+
+                        let e = $mask_ty::splat(false);
+                        let r = a.gt(b);
+                        assert!(r == e);
+                        let r = a.ge(b);
+                        assert!(r == e);
+                        let r = a.eq(b);
+                        assert!(r == e);
+
+                        let mut a = a;
+                        let mut b = b;
+                        let mut e = e;
+                        for i in 0..$id::<i32>::lanes() {
+                            if i % 2 == 0 {
+                                a = a.replace(
+                                    i,
+                                    unsafe { crate::mem::transmute(0_isize) }
+                                );
+                                b = b.replace(
+                                    i,
+                                    unsafe { crate::mem::transmute(1_isize) }
+                                );
+                                e = e.replace(i, true);
+                            } else {
+                                a = a.replace(
+                                    i,
+                                    unsafe { crate::mem::transmute(1_isize) }
+                                );
+                                b = b.replace(
+                                    i,
+                                    unsafe { crate::mem::transmute(0_isize) }
+                                );
+                                e = e.replace(i, false);
+                            }
+                        }
+                        let r = a.lt(b);
+                        assert!(r == e);
+                    }
+                }
+            }
+        }
+
+        #[allow(clippy::partialeq_ne_impl)]
+        impl<T> crate::cmp::PartialEq<$id<T>> for $id<T> {
+            #[inline]
+            fn eq(&self, other: &Self) -> bool {
+                $id::<T>::eq(*self, *other).all()
+            }
+            #[inline]
+            fn ne(&self, other: &Self) -> bool {
+                $id::<T>::ne(*self, *other).any()
+            }
+        }
+
+        // FIXME: https://github.com/rust-lang-nursery/rust-clippy/issues/2892
+        #[allow(clippy::partialeq_ne_impl)]
+        impl<T> crate::cmp::PartialEq<LexicographicallyOrdered<$id<T>>>
+            for LexicographicallyOrdered<$id<T>>
+        {
+            #[inline]
+            fn eq(&self, other: &Self) -> bool {
+                self.0 == other.0
+            }
+            #[inline]
+            fn ne(&self, other: &Self) -> bool {
+                self.0 != other.0
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_PartialEq>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn partial_eq() {
+                        let a = $id::<i32>::null();
+                        let b = $id::<i32>::splat(unsafe {
+                            crate::mem::transmute(1_isize)
+                        });
+
+                        assert!(a != b);
+                        assert!(!(a == b));
+                        assert!(a == a);
+                        assert!(!(a != a));
+
+                        if $id::<i32>::lanes() > 1 {
+                            let a = $id::<i32>::null().replace(0, unsafe {
+                                crate::mem::transmute(1_isize)
+                            });
+                            let b = $id::<i32>::splat(unsafe {
+                                crate::mem::transmute(1_isize)
+                            });
+
+                            assert!(a != b);
+                            assert!(!(a == b));
+                            assert!(a == a);
+                            assert!(!(a != a));
+                        }
+                    }
+                }
+            }
+        }
+
+        impl<T> crate::cmp::Eq for $id<T> {}
+        impl<T> crate::cmp::Eq for LexicographicallyOrdered<$id<T>> {}
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _cmp_eq>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn eq() {
+                        fn foo<E: crate::cmp::Eq>(_: E) {}
+                        let a = $id::<i32>::null();
+                        foo(a);
+                    }
+                }
+            }
+        }
+
+        impl<T> From<[$elem_ty; $elem_count]> for $id<T> {
+            #[inline]
+            fn from(array: [$elem_ty; $elem_count]) -> Self {
+                unsafe {
+                    // FIXME: unnecessary zeroing; better than UB.
+                    let mut u: Self = crate::mem::zeroed();
+                    crate::ptr::copy_nonoverlapping(
+                        &array as *const [$elem_ty; $elem_count] as *const u8,
+                        &mut u as *mut Self as *mut u8,
+                        crate::mem::size_of::<Self>()
+                    );
+                    u
+                }
+            }
+        }
+        impl<T> Into<[$elem_ty; $elem_count]> for $id<T> {
+            #[inline]
+            fn into(self) -> [$elem_ty; $elem_count] {
+                unsafe {
+                    // FIXME: unnecessary zeroing; better than UB.
+                    let mut u: [$elem_ty; $elem_count] = crate::mem::zeroed();
+                    crate::ptr::copy_nonoverlapping(
+                        &self as *const $id<T> as *const u8,
+                        &mut u as *mut [$elem_ty; $elem_count] as *mut u8,
+                        crate::mem::size_of::<Self>()
+                    );
+                    u
+                }
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _from>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn array() {
+                        let values = [1_i32; $elem_count];
+
+                        let mut vec: $id<i32> = Default::default();
+                        let mut array = [
+                            $id::<i32>::null().extract(0); $elem_count
+                        ];
+
+                        for i in 0..$elem_count {
+                            let ptr = unsafe {
+                                crate::mem::transmute(
+                                    &values[i] as *const i32
+                                )
+                            };
+                            vec = vec.replace(i, ptr);
+                            array[i] = ptr;
+                        }
+
+                        // FIXME: there is no impl of From<$id<T>> for [$elem_ty; N]
+                        // let a0 = From::from(vec);
+                        // assert_eq!(a0, array);
+                        #[allow(unused_assignments)]
+                        let mut a1 = array;
+                        a1 = vec.into();
+                        assert_eq!(a1, array);
+
+                        let v0: $id<i32> = From::from(array);
+                        assert_eq!(v0, vec);
+                        let v1: $id<i32> = array.into();
+                        assert_eq!(v1, vec);
+                    }
+                }
+            }
+        }
+
+        impl<T> $id<T> {
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned
+            /// to an `align_of::<Self>()` boundary.
+            #[inline]
+            pub fn from_slice_aligned(slice: &[$elem_ty]) -> Self {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    let target_ptr = slice.get_unchecked(0) as *const $elem_ty;
+                    assert!(
+                        target_ptr.align_offset(crate::mem::align_of::<Self>())
+                            == 0
+                    );
+                    Self::from_slice_aligned_unchecked(slice)
+                }
+            }
+
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()`.
+            #[inline]
+            pub fn from_slice_unaligned(slice: &[$elem_ty]) -> Self {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    Self::from_slice_unaligned_unchecked(slice)
+                }
+            }
+
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned
+            /// to an `align_of::<Self>()` boundary, the behavior is undefined.
+            #[inline]
+            pub unsafe fn from_slice_aligned_unchecked(slice: &[$elem_ty])
+                                                       -> Self {
+                #[allow(clippy::cast_ptr_alignment)]
+                *(slice.get_unchecked(0) as *const $elem_ty as *const Self)
+            }
+
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` the behavior is undefined.
+            #[inline]
+            pub unsafe fn from_slice_unaligned_unchecked(
+                slice: &[$elem_ty],
+            ) -> Self {
+                use crate::mem::size_of;
+                let target_ptr =
+                    slice.get_unchecked(0) as *const $elem_ty as *const u8;
+                let mut x = Self::splat(crate::ptr::null_mut() as $elem_ty);
+                let self_ptr = &mut x as *mut Self as *mut u8;
+                crate::ptr::copy_nonoverlapping(
+                    target_ptr,
+                    self_ptr,
+                    size_of::<Self>(),
+                );
+                x
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _slice_from_slice>] {
+                    use super::*;
+                    use crate::iter::Iterator;
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn from_slice_unaligned() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+
+                        let mut unaligned = [
+                            non_null; $id::<i32>::lanes() + 1
+                        ];
+                        unaligned[0] = null;
+                        let vec = $id::<i32>::from_slice_unaligned(
+                            &unaligned[1..]
+                        );
+                        for (index, &b) in unaligned.iter().enumerate() {
+                            if index == 0 {
+                                assert_eq!(b, null);
+                            } else {
+                                assert_eq!(b, non_null);
+                                assert_eq!(b, vec.extract(index - 1));
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn from_slice_unaligned_fail() {
+                        let (_null, non_null) = ptr_vals!($id<i32>);
+                        let unaligned = [non_null; $id::<i32>::lanes() + 1];
+                        // the slice is not large enough => panic
+                        let _vec = $id::<i32>::from_slice_unaligned(
+                            &unaligned[2..]
+                        );
+                    }
+
+                    union A {
+                        data: [<$id<i32> as sealed::Simd>::Element;
+                               2 * $id::<i32>::lanes()],
+                        _vec: $id<i32>,
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn from_slice_aligned() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+                        let mut aligned = A {
+                            data: [null; 2 * $id::<i32>::lanes()],
+                        };
+                        for i in
+                            $id::<i32>::lanes()..(2 * $id::<i32>::lanes()) {
+                            unsafe {
+                                aligned.data[i] = non_null;
+                            }
+                        }
+
+                        let vec = unsafe {
+                            $id::<i32>::from_slice_aligned(
+                                &aligned.data[$id::<i32>::lanes()..]
+                            )
+                        };
+                        for (index, &b) in unsafe {
+                            aligned.data.iter().enumerate()
+                        } {
+                            if index < $id::<i32>::lanes() {
+                                assert_eq!(b, null);
+                            } else {
+                                assert_eq!(b, non_null);
+                                assert_eq!(
+                                    b, vec.extract(index - $id::<i32>::lanes())
+                                );
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn from_slice_aligned_fail_lanes() {
+                        let (_null, non_null) = ptr_vals!($id<i32>);
+                        let aligned = A {
+                            data: [non_null; 2 * $id::<i32>::lanes()],
+                        };
+                        // the slice is not large enough => panic
+                        let _vec = unsafe {
+                            $id::<i32>::from_slice_aligned(
+                                &aligned.data[2 * $id::<i32>::lanes()..]
+                            )
+                        };
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn from_slice_aligned_fail_align() {
+                        unsafe {
+                            let (null, _non_null) = ptr_vals!($id<i32>);
+                            let aligned = A {
+                                data: [null; 2 * $id::<i32>::lanes()],
+                            };
+
+                            // get a pointer to the front of data
+                            let ptr = aligned.data.as_ptr();
+                            // offset pointer by one element
+                            let ptr = ptr.wrapping_add(1);
+
+                            if ptr.align_offset(
+                                crate::mem::align_of::<$id<i32>>()
+                            ) == 0 {
+                                // the pointer is properly aligned, so
+                                // from_slice_aligned won't fail here (e.g. this
+                                // can happen for i128x1). So we panic to make
+                                // the "should_fail" test pass:
+                                panic!("ok");
+                            }
+
+                            // create a slice - this is safe, because the
+                            // elements of the slice exist, are properly
+                            // initialized, and properly aligned:
+                            let s = slice::from_raw_parts(
+                                ptr, $id::<i32>::lanes()
+                            );
+                            // this should always panic because the slice
+                            // alignment does not match the alignment
+                            // requirements for the vector type:
+                            let _vec = $id::<i32>::from_slice_aligned(s);
+                        }
+                    }
+                }
+            }
+        }
+
+        impl<T> $id<T> {
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not
+            /// aligned to an `align_of::<Self>()` boundary.
+            #[inline]
+            pub fn write_to_slice_aligned(self, slice: &mut [$elem_ty]) {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    let target_ptr =
+                        slice.get_unchecked_mut(0) as *mut $elem_ty;
+                    assert!(
+                        target_ptr.align_offset(crate::mem::align_of::<Self>())
+                            == 0
+                    );
+                    self.write_to_slice_aligned_unchecked(slice);
+                }
+            }
+
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()`.
+            #[inline]
+            pub fn write_to_slice_unaligned(self, slice: &mut [$elem_ty]) {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    self.write_to_slice_unaligned_unchecked(slice);
+                }
+            }
+
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not
+            /// aligned to an `align_of::<Self>()` boundary, the behavior is
+            /// undefined.
+            #[inline]
+            pub unsafe fn write_to_slice_aligned_unchecked(
+                self, slice: &mut [$elem_ty],
+            ) {
+                #[allow(clippy::cast_ptr_alignment)]
+                *(slice.get_unchecked_mut(0) as *mut $elem_ty as *mut Self) =
+                    self;
+            }
+
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` the behavior is undefined.
+            #[inline]
+            pub unsafe fn write_to_slice_unaligned_unchecked(
+                self, slice: &mut [$elem_ty],
+            ) {
+                let target_ptr =
+                    slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8;
+                let self_ptr = &self as *const Self as *const u8;
+                crate::ptr::copy_nonoverlapping(
+                    self_ptr,
+                    target_ptr,
+                    crate::mem::size_of::<Self>(),
+                );
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _slice_write_to_slice>] {
+                    use super::*;
+                    use crate::iter::Iterator;
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn write_to_slice_unaligned() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+                        let mut unaligned = [null; $id::<i32>::lanes() + 1];
+                        let vec = $id::<i32>::splat(non_null);
+                        vec.write_to_slice_unaligned(&mut unaligned[1..]);
+                        for (index, &b) in unaligned.iter().enumerate() {
+                            if index == 0 {
+                                assert_eq!(b, null);
+                            } else {
+                                assert_eq!(b, non_null);
+                                assert_eq!(b, vec.extract(index - 1));
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn write_to_slice_unaligned_fail() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+                        let mut unaligned = [null; $id::<i32>::lanes() + 1];
+                        let vec = $id::<i32>::splat(non_null);
+                        // the slice is not large enough => panic
+                        vec.write_to_slice_unaligned(&mut unaligned[2..]);
+                    }
+
+                    union A {
+                        data: [<$id<i32> as sealed::Simd>::Element;
+                               2 * $id::<i32>::lanes()],
+                        _vec: $id<i32>,
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn write_to_slice_aligned() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+                        let mut aligned = A {
+                            data: [null; 2 * $id::<i32>::lanes()],
+                        };
+                        let vec = $id::<i32>::splat(non_null);
+                        unsafe {
+                            vec.write_to_slice_aligned(
+                                &mut aligned.data[$id::<i32>::lanes()..]
+                            )
+                        };
+                        for (index, &b) in
+                            unsafe { aligned.data.iter().enumerate() } {
+                            if index < $id::<i32>::lanes() {
+                                assert_eq!(b, null);
+                            } else {
+                                assert_eq!(b, non_null);
+                                assert_eq!(
+                                    b, vec.extract(index - $id::<i32>::lanes())
+                                );
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn write_to_slice_aligned_fail_lanes() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+                        let mut aligned = A {
+                            data: [null; 2 * $id::<i32>::lanes()],
+                        };
+                        let vec = $id::<i32>::splat(non_null);
+                        // the slice is not large enough => panic
+                        unsafe {
+                            vec.write_to_slice_aligned(
+                                &mut aligned.data[2 * $id::<i32>::lanes()..]
+                            )
+                        };
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn write_to_slice_aligned_fail_align() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+                        unsafe {
+                            let mut aligned = A {
+                                data: [null; 2 * $id::<i32>::lanes()],
+                            };
+
+                            // get a pointer to the front of data
+                            let ptr = aligned.data.as_mut_ptr();
+                            // offset pointer by one element
+                            let ptr = ptr.wrapping_add(1);
+
+                            if ptr.align_offset(
+                                crate::mem::align_of::<$id<i32>>()
+                            ) == 0 {
+                                // the pointer is properly aligned, so
+                                // write_to_slice_aligned won't fail here (e.g.
+                                // this can happen for i128x1). So we panic to
+                                // make the "should_fail" test pass:
+                                panic!("ok");
+                            }
+
+                            // create a slice - this is safe, because the
+                            // elements of the slice exist, are properly
+                            // initialized, and properly aligned:
+                            let s = slice::from_raw_parts_mut(
+                                ptr, $id::<i32>::lanes()
+                            );
+                            // this should always panic because the slice
+                            // alignment does not match the alignment
+                            // requirements for the vector type:
+                            let vec = $id::<i32>::splat(non_null);
+                            vec.write_to_slice_aligned(s);
+                        }
+                    }
+                }
+            }
+        }
+
+        impl<T> crate::hash::Hash for $id<T> {
+            #[inline]
+            fn hash<H: crate::hash::Hasher>(&self, state: &mut H) {
+                let s: $usize_ty = unsafe { crate::mem::transmute(*self) };
+                s.hash(state)
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _hash>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn hash() {
+                        use crate::hash::{Hash, Hasher};
+                        #[allow(deprecated)]
+                        use crate::hash::{SipHasher13};
+
+                        let values = [1_i32; $elem_count];
+
+                        let mut vec: $id<i32> = Default::default();
+                        let mut array = [
+                            $id::<i32>::null().extract(0);
+                            $elem_count
+                        ];
+
+                        for i in 0..$elem_count {
+                            let ptr = unsafe {
+                                crate::mem::transmute(
+                                    &values[i] as *const i32
+                                )
+                            };
+                            vec = vec.replace(i, ptr);
+                            array[i] = ptr;
+                        }
+
+                        #[allow(deprecated)]
+                        let mut a_hash = SipHasher13::new();
+                        let mut v_hash = a_hash.clone();
+                        array.hash(&mut a_hash);
+                        vec.hash(&mut v_hash);
+                        assert_eq!(a_hash.finish(), v_hash.finish());
+                    }
+                }
+            }
+        }
+
+        impl<T> $id<T> {
+            /// Calculates the offset from a pointer.
+            ///
+            /// `count` is in units of `T`; e.g. a count of `3` represents a
+            /// pointer offset of `3 * size_of::<T>()` bytes.
+            ///
+            /// # Safety
+            ///
+            /// If any of the following conditions are violated, the result is
+            /// Undefined Behavior:
+            ///
+            /// * Both the starting and resulting pointer must be either in
+            /// bounds or one byte past the end of an allocated object.
+            ///
+            /// * The computed offset, in bytes, cannot overflow an `isize`.
+            ///
+            /// * The offset being in bounds cannot rely on "wrapping around"
+            /// the address space. That is, the infinite-precision sum, in bytes
+            /// must fit in a `usize`.
+            ///
+            /// The compiler and standard library generally tries to ensure
+            /// allocations never reach a size where an offset is a concern. For
+            /// instance, `Vec` and `Box` ensure they never allocate more than
+            /// `isize::MAX` bytes, so `vec.as_ptr().offset(vec.len() as isize)`
+            /// is always safe.
+            ///
+            /// Most platforms fundamentally can't even construct such an
+            /// allocation. For instance, no known 64-bit platform can ever
+            /// serve a request for 263 bytes due to page-table limitations or
+            /// splitting the address space. However, some 32-bit and 16-bit
+            /// platforms may successfully serve a request for more than
+            /// `isize::MAX` bytes with things like Physical Address Extension.
+            /// As such, memory acquired directly from allocators or memory
+            /// mapped files may be too large to handle with this function.
+            ///
+            /// Consider using `wrapping_offset` instead if these constraints
+            /// are difficult to satisfy. The only advantage of this method is
+            /// that it enables more aggressive compiler optimizations.
+            #[inline]
+            pub unsafe fn offset(self, count: $isize_ty) -> Self {
+                // FIXME: should use LLVM's `add nsw nuw`
+                self.wrapping_offset(count)
+            }
+
+            /// Calculates the offset from a pointer using wrapping arithmetic.
+            ///
+            /// `count` is in units of `T`; e.g. a count of `3` represents a
+            /// pointer offset of `3 * size_of::<T>()` bytes.
+            ///
+            /// # Safety
+            ///
+            /// The resulting pointer does not need to be in bounds, but it is
+            /// potentially hazardous to dereference (which requires unsafe).
+            ///
+            /// Always use `.offset(count)` instead when possible, because
+            /// offset allows the compiler to optimize better.
+            #[inline]
+            pub fn wrapping_offset(self, count: $isize_ty) -> Self {
+                unsafe {
+                    let x: $isize_ty = crate::mem::transmute(self);
+                    // note: {+,*} currently performs a `wrapping_{add, mul}`
+                    crate::mem::transmute(
+                        x + (count * crate::mem::size_of::<T>() as isize)
+                    )
+                }
+            }
+
+            /// Calculates the distance between two pointers.
+            ///
+            /// The returned value is in units of `T`: the distance in bytes is
+            /// divided by `mem::size_of::<T>()`.
+            ///
+            /// This function is the inverse of offset.
+            ///
+            /// # Safety
+            ///
+            /// If any of the following conditions are violated, the result is
+            /// Undefined Behavior:
+            ///
+            /// * Both the starting and other pointer must be either in bounds
+            /// or one byte past the end of the same allocated object.
+            ///
+            /// * The distance between the pointers, in bytes, cannot overflow
+            /// an `isize`.
+            ///
+            /// * The distance between the pointers, in bytes, must be an exact
+            /// multiple of the size of `T`.
+            ///
+            /// * The distance being in bounds cannot rely on "wrapping around"
+            /// the address space.
+            ///
+            /// The compiler and standard library generally try to ensure
+            /// allocations never reach a size where an offset is a concern. For
+            /// instance, `Vec` and `Box` ensure they never allocate more than
+            /// `isize::MAX` bytes, so `ptr_into_vec.offset_from(vec.as_ptr())`
+            /// is always safe.
+            ///
+            /// Most platforms fundamentally can't even construct such an
+            /// allocation. For instance, no known 64-bit platform can ever
+            /// serve a request for 263 bytes due to page-table limitations or
+            /// splitting the address space. However, some 32-bit and 16-bit
+            /// platforms may successfully serve a request for more than
+            /// `isize::MAX` bytes with things like Physical Address Extension.
+            /// As such, memory acquired directly from allocators or memory
+            /// mapped files may be too large to handle with this function.
+            ///
+            /// Consider using wrapping_offset_from instead if these constraints
+            /// are difficult to satisfy. The only advantage of this method is
+            /// that it enables more aggressive compiler optimizations.
+            #[inline]
+            pub unsafe fn offset_from(self, origin: Self) -> $isize_ty {
+                // FIXME: should use LLVM's `sub nsw nuw`.
+                self.wrapping_offset_from(origin)
+            }
+
+            /// Calculates the distance between two pointers.
+            ///
+            /// The returned value is in units of `T`: the distance in bytes is
+            /// divided by `mem::size_of::<T>()`.
+            ///
+            /// If the address different between the two pointers is not a
+            /// multiple of `mem::size_of::<T>()` then the result of the
+            /// division is rounded towards zero.
+            ///
+            /// Though this method is safe for any two pointers, note that its
+            /// result will be mostly useless if the two pointers aren't into
+            /// the same allocated object, for example if they point to two
+            /// different local variables.
+            #[inline]
+            pub fn wrapping_offset_from(self, origin: Self) -> $isize_ty {
+                let x: $isize_ty = unsafe { crate::mem::transmute(self) };
+                let y: $isize_ty = unsafe { crate::mem::transmute(origin) };
+                // note: {-,/} currently perform wrapping_{sub, div}
+                (y - x) / (crate::mem::size_of::<T>() as isize)
+            }
+
+            /// Calculates the offset from a pointer (convenience for
+            /// `.offset(count as isize)`).
+            ///
+            /// `count` is in units of `T`; e.g. a count of 3 represents a
+            /// pointer offset of `3 * size_of::<T>()` bytes.
+            ///
+            /// # Safety
+            ///
+            /// If any of the following conditions are violated, the result is
+            /// Undefined Behavior:
+            ///
+            /// * Both the starting and resulting pointer must be either in
+            /// bounds or one byte past the end of an allocated object.
+            ///
+            /// * The computed offset, in bytes, cannot overflow an `isize`.
+            ///
+            /// * The offset being in bounds cannot rely on "wrapping around"
+            /// the address space. That is, the infinite-precision sum must fit
+            /// in a `usize`.
+            ///
+            /// The compiler and standard library generally tries to ensure
+            /// allocations never reach a size where an offset is a concern. For
+            /// instance, `Vec` and `Box` ensure they never allocate more than
+            /// `isize::MAX` bytes, so `vec.as_ptr().add(vec.len())` is always
+            /// safe.
+            ///
+            /// Most platforms fundamentally can't even construct such an
+            /// allocation. For instance, no known 64-bit platform can ever
+            /// serve a request for 263 bytes due to page-table limitations or
+            /// splitting the address space. However, some 32-bit and 16-bit
+            /// platforms may successfully serve a request for more than
+            /// `isize::MAX` bytes with things like Physical Address Extension.
+            /// As such, memory acquired directly from allocators or memory
+            /// mapped files may be too large to handle with this function.
+            ///
+            /// Consider using `wrapping_offset` instead if these constraints
+            /// are difficult to satisfy. The only advantage of this method is
+            /// that it enables more aggressive compiler optimizations.
+            #[inline]
+            #[allow(clippy::should_implement_trait)]
+            pub unsafe fn add(self, count: $usize_ty) -> Self {
+                self.offset(count.cast())
+            }
+
+            /// Calculates the offset from a pointer (convenience for
+            /// `.offset((count as isize).wrapping_neg())`).
+            ///
+            /// `count` is in units of T; e.g. a `count` of 3 represents a
+            /// pointer offset of `3 * size_of::<T>()` bytes.
+            ///
+            /// # Safety
+            ///
+            /// If any of the following conditions are violated, the result is
+            /// Undefined Behavior:
+            ///
+            /// * Both the starting and resulting pointer must be either in
+            /// bounds or one byte past the end of an allocated object.
+            ///
+            /// * The computed offset cannot exceed `isize::MAX` **bytes**.
+            ///
+            /// * The offset being in bounds cannot rely on "wrapping around"
+            /// the address space. That is, the infinite-precision sum must fit
+            /// in a usize.
+            ///
+            /// The compiler and standard library generally tries to ensure
+            /// allocations never reach a size where an offset is a concern. For
+            /// instance, `Vec` and `Box` ensure they never allocate more than
+            /// `isize::MAX` bytes, so
+            /// `vec.as_ptr().add(vec.len()).sub(vec.len())` is always safe.
+            ///
+            /// Most platforms fundamentally can't even construct such an
+            /// allocation. For instance, no known 64-bit platform can ever
+            /// serve a request for 2<sup>63</sup> bytes due to page-table
+            /// limitations or splitting the address space. However, some 32-bit
+            /// and 16-bit platforms may successfully serve a request for more
+            /// than `isize::MAX` bytes with things like Physical Address
+            /// Extension. As such, memory acquired directly from allocators or
+            /// memory mapped files *may* be too large to handle with this
+            /// function.
+            ///
+            /// Consider using `wrapping_offset` instead if these constraints
+            /// are difficult to satisfy. The only advantage of this method is
+            /// that it enables more aggressive compiler optimizations.
+            #[inline]
+            #[allow(clippy::should_implement_trait)]
+            pub unsafe fn sub(self, count: $usize_ty) -> Self {
+                let x: $isize_ty = count.cast();
+                // note: - is currently wrapping_neg
+                self.offset(-x)
+            }
+
+            /// Calculates the offset from a pointer using wrapping arithmetic.
+            /// (convenience for `.wrapping_offset(count as isize)`)
+            ///
+            /// `count` is in units of T; e.g. a `count` of 3 represents a
+            /// pointer offset of `3 * size_of::<T>()` bytes.
+            ///
+            /// # Safety
+            ///
+            /// The resulting pointer does not need to be in bounds, but it is
+            /// potentially hazardous to dereference (which requires `unsafe`).
+            ///
+            /// Always use `.add(count)` instead when possible, because `add`
+            /// allows the compiler to optimize better.
+            #[inline]
+            pub fn wrapping_add(self, count: $usize_ty) -> Self {
+                self.wrapping_offset(count.cast())
+            }
+
+            /// Calculates the offset from a pointer using wrapping arithmetic.
+            /// (convenience for `.wrapping_offset((count as
+            /// isize).wrapping_sub())`)
+            ///
+            /// `count` is in units of T; e.g. a `count` of 3 represents a
+            /// pointer offset of `3 * size_of::<T>()` bytes.
+            ///
+            /// # Safety
+            ///
+            /// The resulting pointer does not need to be in bounds, but it is
+            /// potentially hazardous to dereference (which requires `unsafe`).
+            ///
+            /// Always use `.sub(count)` instead when possible, because `sub`
+            /// allows the compiler to optimize better.
+            #[inline]
+            pub fn wrapping_sub(self, count: $usize_ty) -> Self {
+                let x: $isize_ty = count.cast();
+                self.wrapping_offset(-1 * x)
+            }
+        }
+
+        impl<T> $id<T> {
+            /// Shuffle vector elements according to `indices`.
+            #[inline]
+            pub fn shuffle1_dyn<I>(self, indices: I) -> Self
+                where
+                Self: codegen::shuffle1_dyn::Shuffle1Dyn<Indices = I>,
+            {
+                codegen::shuffle1_dyn::Shuffle1Dyn::shuffle1_dyn(self, indices)
+            }
+        }
+
+        test_if! {
+                $test_tt:
+            paste::item! {
+                pub mod [<$id _shuffle1_dyn>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn shuffle1_dyn() {
+                        let (null, non_null) = ptr_vals!($id<i32>);
+
+                        // alternating = [non_null, null, non_null, null, ...]
+                        let mut alternating = $id::<i32>::splat(null);
+                        for i in 0..$id::<i32>::lanes() {
+                            if i % 2 == 0 {
+                                alternating = alternating.replace(i, non_null);
+                            }
+                        }
+
+                        type Indices = <$id<i32>
+                            as codegen::shuffle1_dyn::Shuffle1Dyn>::Indices;
+                        // even = [0, 0, 2, 2, 4, 4, ..]
+                        let even = {
+                            let mut v = Indices::splat(0);
+                            for i in 0..$id::<i32>::lanes() {
+                                if i % 2 == 0 {
+                                    v = v.replace(i, (i as u8).into());
+                                } else {
+                                v = v.replace(i, (i as u8 - 1).into());
+                                }
+                            }
+                            v
+                        };
+                        // odd = [1, 1, 3, 3, 5, 5, ...]
+                        let odd = {
+                            let mut v = Indices::splat(0);
+                            for i in 0..$id::<i32>::lanes() {
+                                if i % 2 != 0 {
+                                    v = v.replace(i, (i as u8).into());
+                                } else {
+                                    v = v.replace(i, (i as u8 + 1).into());
+                                }
+                            }
+                            v
+                        };
+
+                        assert_eq!(
+                            alternating.shuffle1_dyn(even),
+                            $id::<i32>::splat(non_null)
+                        );
+                        if $id::<i32>::lanes() > 1 {
+                            assert_eq!(
+                                alternating.shuffle1_dyn(odd),
+                                $id::<i32>::splat(null)
+                            );
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops.rs.html b/src/packed_simd/api/ops.rs.html new file mode 100644 index 000000000..ffb79a56d --- /dev/null +++ b/src/packed_simd/api/ops.rs.html @@ -0,0 +1,67 @@ +ops.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+
+//! Implementation of the `ops` traits
+#[macro_use]
+mod vector_mask_bitwise;
+#[macro_use]
+mod scalar_mask_bitwise;
+
+#[macro_use]
+mod vector_arithmetic;
+#[macro_use]
+mod scalar_arithmetic;
+
+#[macro_use]
+mod vector_bitwise;
+#[macro_use]
+mod scalar_bitwise;
+
+#[macro_use]
+mod vector_shifts;
+#[macro_use]
+mod scalar_shifts;
+
+#[macro_use]
+mod vector_rotates;
+
+#[macro_use]
+mod vector_neg;
+
+#[macro_use]
+mod vector_int_min_max;
+
+#[macro_use]
+mod vector_float_min_max;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/scalar_arithmetic.rs.html b/src/packed_simd/api/ops/scalar_arithmetic.rs.html new file mode 100644 index 000000000..fed346b94 --- /dev/null +++ b/src/packed_simd/api/ops/scalar_arithmetic.rs.html @@ -0,0 +1,409 @@ +scalar_arithmetic.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+
+//! Vertical (lane-wise) vector-scalar / scalar-vector arithmetic operations.
+
+macro_rules! impl_ops_scalar_arithmetic {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::ops::Add<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn add(self, other: $elem_ty) -> Self {
+                self + $id::splat(other)
+            }
+        }
+        impl crate::ops::Add<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn add(self, other: $id) -> $id {
+                $id::splat(self) + other
+            }
+        }
+
+        impl crate::ops::Sub<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn sub(self, other: $elem_ty) -> Self {
+                self - $id::splat(other)
+            }
+        }
+        impl crate::ops::Sub<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn sub(self, other: $id) -> $id {
+                $id::splat(self) - other
+            }
+        }
+
+        impl crate::ops::Mul<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn mul(self, other: $elem_ty) -> Self {
+                self * $id::splat(other)
+            }
+        }
+        impl crate::ops::Mul<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn mul(self, other: $id) -> $id {
+                $id::splat(self) * other
+            }
+        }
+
+        impl crate::ops::Div<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn div(self, other: $elem_ty) -> Self {
+                self / $id::splat(other)
+            }
+        }
+        impl crate::ops::Div<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn div(self, other: $id) -> $id {
+                $id::splat(self) / other
+            }
+        }
+
+        impl crate::ops::Rem<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn rem(self, other: $elem_ty) -> Self {
+                self % $id::splat(other)
+            }
+        }
+        impl crate::ops::Rem<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn rem(self, other: $id) -> $id {
+                $id::splat(self) % other
+            }
+        }
+
+        impl crate::ops::AddAssign<$elem_ty> for $id {
+            #[inline]
+            fn add_assign(&mut self, other: $elem_ty) {
+                *self = *self + other;
+            }
+        }
+
+        impl crate::ops::SubAssign<$elem_ty> for $id {
+            #[inline]
+            fn sub_assign(&mut self, other: $elem_ty) {
+                *self = *self - other;
+            }
+        }
+
+        impl crate::ops::MulAssign<$elem_ty> for $id {
+            #[inline]
+            fn mul_assign(&mut self, other: $elem_ty) {
+                *self = *self * other;
+            }
+        }
+
+        impl crate::ops::DivAssign<$elem_ty> for $id {
+            #[inline]
+            fn div_assign(&mut self, other: $elem_ty) {
+                *self = *self / other;
+            }
+        }
+
+        impl crate::ops::RemAssign<$elem_ty> for $id {
+            #[inline]
+            fn rem_assign(&mut self, other: $elem_ty) {
+                *self = *self % other;
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_scalar_arith>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn ops_scalar_arithmetic() {
+                        let zi = 0 as $elem_ty;
+                        let oi = 1 as $elem_ty;
+                        let ti = 2 as $elem_ty;
+                        let fi = 4 as $elem_ty;
+                        let z = $id::splat(zi);
+                        let o = $id::splat(oi);
+                        let t = $id::splat(ti);
+                        let f = $id::splat(fi);
+
+                        // add
+                        assert_eq!(zi + z, z);
+                        assert_eq!(z + zi, z);
+                        assert_eq!(oi + z, o);
+                        assert_eq!(o + zi, o);
+                        assert_eq!(ti + z, t);
+                        assert_eq!(t + zi, t);
+                        assert_eq!(ti + t, f);
+                        assert_eq!(t + ti, f);
+                        // sub
+                        assert_eq!(zi - z, z);
+                        assert_eq!(z - zi, z);
+                        assert_eq!(oi - z, o);
+                        assert_eq!(o - zi, o);
+                        assert_eq!(ti - z, t);
+                        assert_eq!(t - zi, t);
+                        assert_eq!(fi - t, t);
+                        assert_eq!(f - ti, t);
+                        assert_eq!(f - o - o, t);
+                        assert_eq!(f - oi - oi, t);
+                        // mul
+                        assert_eq!(zi * z, z);
+                        assert_eq!(z * zi, z);
+                        assert_eq!(zi * o, z);
+                        assert_eq!(z * oi, z);
+                        assert_eq!(zi * t, z);
+                        assert_eq!(z * ti, z);
+                        assert_eq!(oi * t, t);
+                        assert_eq!(o * ti, t);
+                        assert_eq!(ti * t, f);
+                        assert_eq!(t * ti, f);
+                        // div
+                        assert_eq!(zi / o, z);
+                        assert_eq!(z / oi, z);
+                        assert_eq!(ti / o, t);
+                        assert_eq!(t / oi, t);
+                        assert_eq!(fi / o, f);
+                        assert_eq!(f / oi, f);
+                        assert_eq!(ti / t, o);
+                        assert_eq!(t / ti, o);
+                        assert_eq!(fi / t, t);
+                        assert_eq!(f / ti, t);
+                        // rem
+                        assert_eq!(oi % o, z);
+                        assert_eq!(o % oi, z);
+                        assert_eq!(fi % t, z);
+                        assert_eq!(f % ti, z);
+
+                        {
+                            let mut v = z;
+                            assert_eq!(v, z);
+                            v += oi; // add_assign
+                            assert_eq!(v, o);
+                            v -= oi; // sub_assign
+                            assert_eq!(v, z);
+                            v = t;
+                            v *= oi; // mul_assign
+                            assert_eq!(v, t);
+                            v *= ti;
+                            assert_eq!(v, f);
+                            v /= oi; // div_assign
+                            assert_eq!(v, f);
+                            v /= ti;
+                            assert_eq!(v, t);
+                            v %= ti; // rem_assign
+                            assert_eq!(v, z);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/scalar_bitwise.rs.html b/src/packed_simd/api/ops/scalar_bitwise.rs.html new file mode 100644 index 000000000..8d4924a11 --- /dev/null +++ b/src/packed_simd/api/ops/scalar_bitwise.rs.html @@ -0,0 +1,327 @@ +scalar_bitwise.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+
+//! Vertical (lane-wise) vector-scalar / scalar-vector bitwise operations.
+
+macro_rules! impl_ops_scalar_bitwise {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $test_tt:tt |
+        ($true:expr, $false:expr)
+    ) => {
+        impl crate::ops::BitXor<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn bitxor(self, other: $elem_ty) -> Self {
+                self ^ $id::splat(other)
+            }
+        }
+        impl crate::ops::BitXor<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn bitxor(self, other: $id) -> $id {
+                $id::splat(self) ^ other
+            }
+        }
+
+        impl crate::ops::BitAnd<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn bitand(self, other: $elem_ty) -> Self {
+                self & $id::splat(other)
+            }
+        }
+        impl crate::ops::BitAnd<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn bitand(self, other: $id) -> $id {
+                $id::splat(self) & other
+            }
+        }
+
+        impl crate::ops::BitOr<$elem_ty> for $id {
+            type Output = Self;
+            #[inline]
+            fn bitor(self, other: $elem_ty) -> Self {
+                self | $id::splat(other)
+            }
+        }
+        impl crate::ops::BitOr<$id> for $elem_ty {
+            type Output = $id;
+            #[inline]
+            fn bitor(self, other: $id) -> $id {
+                $id::splat(self) | other
+            }
+        }
+
+        impl crate::ops::BitAndAssign<$elem_ty> for $id {
+            #[inline]
+            fn bitand_assign(&mut self, other: $elem_ty) {
+                *self = *self & other;
+            }
+        }
+        impl crate::ops::BitOrAssign<$elem_ty> for $id {
+            #[inline]
+            fn bitor_assign(&mut self, other: $elem_ty) {
+                *self = *self | other;
+            }
+        }
+        impl crate::ops::BitXorAssign<$elem_ty> for $id {
+            #[inline]
+            fn bitxor_assign(&mut self, other: $elem_ty) {
+                *self = *self ^ other;
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_scalar_bitwise>] {
+                    use super::*;
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn ops_scalar_bitwise() {
+                        let zi = 0 as $elem_ty;
+                        let oi = 1 as $elem_ty;
+                        let ti = 2 as $elem_ty;
+                        let z = $id::splat(zi);
+                        let o = $id::splat(oi);
+                        let t = $id::splat(ti);
+
+                        // BitAnd:
+                        assert_eq!(oi & o, o);
+                        assert_eq!(o & oi, o);
+                        assert_eq!(oi & z, z);
+                        assert_eq!(o & zi, z);
+                        assert_eq!(zi & o, z);
+                        assert_eq!(z & oi, z);
+                        assert_eq!(zi & z, z);
+                        assert_eq!(z & zi, z);
+
+                        assert_eq!(ti & t, t);
+                        assert_eq!(t & ti, t);
+                        assert_eq!(ti & o, z);
+                        assert_eq!(t & oi, z);
+                        assert_eq!(oi & t, z);
+                        assert_eq!(o & ti, z);
+
+                        // BitOr:
+                        assert_eq!(oi | o, o);
+                        assert_eq!(o | oi, o);
+                        assert_eq!(oi | z, o);
+                        assert_eq!(o | zi, o);
+                        assert_eq!(zi | o, o);
+                        assert_eq!(z | oi, o);
+                        assert_eq!(zi | z, z);
+                        assert_eq!(z | zi, z);
+
+                        assert_eq!(ti | t, t);
+                        assert_eq!(t | ti, t);
+                        assert_eq!(zi | t, t);
+                        assert_eq!(z | ti, t);
+                        assert_eq!(ti | z, t);
+                        assert_eq!(t | zi, t);
+
+                        // BitXOR:
+                        assert_eq!(oi ^ o, z);
+                        assert_eq!(o ^ oi, z);
+                        assert_eq!(zi ^ z, z);
+                        assert_eq!(z ^ zi, z);
+                        assert_eq!(zi ^ o, o);
+                        assert_eq!(z ^ oi, o);
+                        assert_eq!(oi ^ z, o);
+                        assert_eq!(o ^ zi, o);
+
+                        assert_eq!(ti ^ t, z);
+                        assert_eq!(t ^ ti, z);
+                        assert_eq!(ti ^ z, t);
+                        assert_eq!(t ^ zi, t);
+                        assert_eq!(zi ^ t, t);
+                        assert_eq!(z ^ ti, t);
+
+                        {
+                            // AndAssign:
+                            let mut v = o;
+                            v &= ti;
+                            assert_eq!(v, z);
+                        }
+                        {
+                            // OrAssign:
+                            let mut v = z;
+                            v |= oi;
+                            assert_eq!(v, o);
+                        }
+                        {
+                            // XORAssign:
+                            let mut v = z;
+                            v ^= oi;
+                            assert_eq!(v, o);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/scalar_mask_bitwise.rs.html b/src/packed_simd/api/ops/scalar_mask_bitwise.rs.html new file mode 100644 index 000000000..814a4c6c3 --- /dev/null +++ b/src/packed_simd/api/ops/scalar_mask_bitwise.rs.html @@ -0,0 +1,283 @@ +scalar_mask_bitwise.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+
+//! Vertical (lane-wise) vector-vector bitwise operations.
+
+macro_rules! impl_ops_scalar_mask_bitwise {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $test_tt:tt |
+        ($true:expr, $false:expr)
+    ) => {
+        impl crate::ops::BitXor<bool> for $id {
+            type Output = Self;
+            #[inline]
+            fn bitxor(self, other: bool) -> Self {
+                self ^ $id::splat(other)
+            }
+        }
+        impl crate::ops::BitXor<$id> for bool {
+            type Output = $id;
+            #[inline]
+            fn bitxor(self, other: $id) -> $id {
+                $id::splat(self) ^ other
+            }
+        }
+
+        impl crate::ops::BitAnd<bool> for $id {
+            type Output = Self;
+            #[inline]
+            fn bitand(self, other: bool) -> Self {
+                self & $id::splat(other)
+            }
+        }
+        impl crate::ops::BitAnd<$id> for bool {
+            type Output = $id;
+            #[inline]
+            fn bitand(self, other: $id) -> $id {
+                $id::splat(self) & other
+            }
+        }
+
+        impl crate::ops::BitOr<bool> for $id {
+            type Output = Self;
+            #[inline]
+            fn bitor(self, other: bool) -> Self {
+                self | $id::splat(other)
+            }
+        }
+        impl crate::ops::BitOr<$id> for bool {
+            type Output = $id;
+            #[inline]
+            fn bitor(self, other: $id) -> $id {
+                $id::splat(self) | other
+            }
+        }
+
+        impl crate::ops::BitAndAssign<bool> for $id {
+            #[inline]
+            fn bitand_assign(&mut self, other: bool) {
+                *self = *self & other;
+            }
+        }
+        impl crate::ops::BitOrAssign<bool> for $id {
+            #[inline]
+            fn bitor_assign(&mut self, other: bool) {
+                *self = *self | other;
+            }
+        }
+        impl crate::ops::BitXorAssign<bool> for $id {
+            #[inline]
+            fn bitxor_assign(&mut self, other: bool) {
+                *self = *self ^ other;
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_scalar_mask_bitwise>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn ops_scalar_mask_bitwise() {
+                        let ti = true;
+                        let fi = false;
+                        let t = $id::splat(ti);
+                        let f = $id::splat(fi);
+                        assert!(t != f);
+                        assert!(!(t == f));
+
+                        // BitAnd:
+                        assert_eq!(ti & f, f);
+                        assert_eq!(t & fi, f);
+                        assert_eq!(fi & t, f);
+                        assert_eq!(f & ti, f);
+                        assert_eq!(ti & t, t);
+                        assert_eq!(t & ti, t);
+                        assert_eq!(fi & f, f);
+                        assert_eq!(f & fi, f);
+
+                        // BitOr:
+                        assert_eq!(ti | f, t);
+                        assert_eq!(t | fi, t);
+                        assert_eq!(fi | t, t);
+                        assert_eq!(f | ti, t);
+                        assert_eq!(ti | t, t);
+                        assert_eq!(t | ti, t);
+                        assert_eq!(fi | f, f);
+                        assert_eq!(f | fi, f);
+
+                        // BitXOR:
+                        assert_eq!(ti ^ f, t);
+                        assert_eq!(t ^ fi, t);
+                        assert_eq!(fi ^ t, t);
+                        assert_eq!(f ^ ti, t);
+                        assert_eq!(ti ^ t, f);
+                        assert_eq!(t ^ ti, f);
+                        assert_eq!(fi ^ f, f);
+                        assert_eq!(f ^ fi, f);
+
+                        {
+                            // AndAssign:
+                            let mut v = f;
+                            v &= ti;
+                            assert_eq!(v, f);
+                        }
+                        {
+                            // OrAssign:
+                            let mut v = f;
+                            v |= ti;
+                            assert_eq!(v, t);
+                        }
+                        {
+                            // XORAssign:
+                            let mut v = f;
+                            v ^= ti;
+                            assert_eq!(v, t);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/scalar_shifts.rs.html b/src/packed_simd/api/ops/scalar_shifts.rs.html new file mode 100644 index 000000000..ce5d0c3b4 --- /dev/null +++ b/src/packed_simd/api/ops/scalar_shifts.rs.html @@ -0,0 +1,217 @@ +scalar_shifts.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+
+//! Vertical (lane-wise) vector-scalar shifts operations.
+
+macro_rules! impl_ops_scalar_shifts {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::ops::Shl<u32> for $id {
+            type Output = Self;
+            #[inline]
+            fn shl(self, other: u32) -> Self {
+                self << $id::splat(other as $elem_ty)
+            }
+        }
+        impl crate::ops::Shr<u32> for $id {
+            type Output = Self;
+            #[inline]
+            fn shr(self, other: u32) -> Self {
+                self >> $id::splat(other as $elem_ty)
+            }
+        }
+
+        impl crate::ops::ShlAssign<u32> for $id {
+            #[inline]
+            fn shl_assign(&mut self, other: u32) {
+                *self = *self << other;
+            }
+        }
+        impl crate::ops::ShrAssign<u32> for $id {
+            #[inline]
+            fn shr_assign(&mut self, other: u32) {
+                *self = *self >> other;
+            }
+        }
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_scalar_shifts>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg_attr(any(target_arch = "s390x", target_arch = "sparc64"),
+                               allow(unreachable_code,
+                                     unused_variables,
+                                     unused_mut)
+                    )]
+                    // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+                    fn ops_scalar_shifts() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let f = $id::splat(4 as $elem_ty);
+
+                        {
+                            let zi = 0 as u32;
+                            let oi = 1 as u32;
+                            let ti = 2 as u32;
+                            let maxi
+                                = (mem::size_of::<$elem_ty>() * 8 - 1) as u32;
+
+                            // shr
+                            assert_eq!(z >> zi, z);
+                            assert_eq!(z >> oi, z);
+                            assert_eq!(z >> ti, z);
+                            assert_eq!(z >> ti, z);
+
+                            #[cfg(any(target_arch = "s390x", target_arch = "sparc64"))] {
+                                // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/13
+                                return;
+                            }
+
+                            assert_eq!(o >> zi, o);
+                            assert_eq!(t >> zi, t);
+                            assert_eq!(f >> zi, f);
+                            assert_eq!(f >> maxi, z);
+
+                            assert_eq!(o >> oi, z);
+                            assert_eq!(t >> oi, o);
+                            assert_eq!(t >> ti, z);
+                            assert_eq!(f >> oi, t);
+                            assert_eq!(f >> ti, o);
+                            assert_eq!(f >> maxi, z);
+
+                            // shl
+                            assert_eq!(z << zi, z);
+                            assert_eq!(o << zi, o);
+                            assert_eq!(t << zi, t);
+                            assert_eq!(f << zi, f);
+                            assert_eq!(f << maxi, z);
+
+                            assert_eq!(o << oi, t);
+                            assert_eq!(o << ti, f);
+                            assert_eq!(t << oi, f);
+
+                            {  // shr_assign
+                                let mut v = o;
+                                v >>= oi;
+                                assert_eq!(v, z);
+                            }
+                            {  // shl_assign
+                                let mut v = o;
+                                v <<= oi;
+                                assert_eq!(v, t);
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_arithmetic.rs.html b/src/packed_simd/api/ops/vector_arithmetic.rs.html new file mode 100644 index 000000000..39fd81564 --- /dev/null +++ b/src/packed_simd/api/ops/vector_arithmetic.rs.html @@ -0,0 +1,299 @@ +vector_arithmetic.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+
+//! Vertical (lane-wise) vector-vector arithmetic operations.
+
+macro_rules! impl_ops_vector_arithmetic {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::ops::Add for $id {
+            type Output = Self;
+            #[inline]
+            fn add(self, other: Self) -> Self {
+                use crate::llvm::simd_add;
+                unsafe { Simd(simd_add(self.0, other.0)) }
+            }
+        }
+
+        impl crate::ops::Sub for $id {
+            type Output = Self;
+            #[inline]
+            fn sub(self, other: Self) -> Self {
+                use crate::llvm::simd_sub;
+                unsafe { Simd(simd_sub(self.0, other.0)) }
+            }
+        }
+
+        impl crate::ops::Mul for $id {
+            type Output = Self;
+            #[inline]
+            fn mul(self, other: Self) -> Self {
+                use crate::llvm::simd_mul;
+                unsafe { Simd(simd_mul(self.0, other.0)) }
+            }
+        }
+
+        impl crate::ops::Div for $id {
+            type Output = Self;
+            #[inline]
+            fn div(self, other: Self) -> Self {
+                use crate::llvm::simd_div;
+                unsafe { Simd(simd_div(self.0, other.0)) }
+            }
+        }
+
+        impl crate::ops::Rem for $id {
+            type Output = Self;
+            #[inline]
+            fn rem(self, other: Self) -> Self {
+                use crate::llvm::simd_rem;
+                unsafe { Simd(simd_rem(self.0, other.0)) }
+            }
+        }
+
+        impl crate::ops::AddAssign for $id {
+            #[inline]
+            fn add_assign(&mut self, other: Self) {
+                *self = *self + other;
+            }
+        }
+
+        impl crate::ops::SubAssign for $id {
+            #[inline]
+            fn sub_assign(&mut self, other: Self) {
+                *self = *self - other;
+            }
+        }
+
+        impl crate::ops::MulAssign for $id {
+            #[inline]
+            fn mul_assign(&mut self, other: Self) {
+                *self = *self * other;
+            }
+        }
+
+        impl crate::ops::DivAssign for $id {
+            #[inline]
+            fn div_assign(&mut self, other: Self) {
+                *self = *self / other;
+            }
+        }
+
+        impl crate::ops::RemAssign for $id {
+            #[inline]
+            fn rem_assign(&mut self, other: Self) {
+                *self = *self % other;
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+               pub mod [<$id _ops_vector_arith>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn ops_vector_arithmetic() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let f = $id::splat(4 as $elem_ty);
+
+                        // add
+                        assert_eq!(z + z, z);
+                        assert_eq!(o + z, o);
+                        assert_eq!(t + z, t);
+                        assert_eq!(t + t, f);
+                        // sub
+                        assert_eq!(z - z, z);
+                        assert_eq!(o - z, o);
+                        assert_eq!(t - z, t);
+                        assert_eq!(f - t, t);
+                        assert_eq!(f - o - o, t);
+                        // mul
+                        assert_eq!(z * z, z);
+                        assert_eq!(z * o, z);
+                        assert_eq!(z * t, z);
+                        assert_eq!(o * t, t);
+                        assert_eq!(t * t, f);
+                        // div
+                        assert_eq!(z / o, z);
+                        assert_eq!(t / o, t);
+                        assert_eq!(f / o, f);
+                        assert_eq!(t / t, o);
+                        assert_eq!(f / t, t);
+                        // rem
+                        assert_eq!(o % o, z);
+                        assert_eq!(f % t, z);
+
+                        {
+                            let mut v = z;
+                            assert_eq!(v, z);
+                            v += o; // add_assign
+                            assert_eq!(v, o);
+                            v -= o; // sub_assign
+                            assert_eq!(v, z);
+                            v = t;
+                            v *= o; // mul_assign
+                            assert_eq!(v, t);
+                            v *= t;
+                            assert_eq!(v, f);
+                            v /= o; // div_assign
+                            assert_eq!(v, f);
+                            v /= t;
+                            assert_eq!(v, t);
+                            v %= t; // rem_assign
+                            assert_eq!(v, z);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_bitwise.rs.html b/src/packed_simd/api/ops/vector_bitwise.rs.html new file mode 100644 index 000000000..233f876ba --- /dev/null +++ b/src/packed_simd/api/ops/vector_bitwise.rs.html @@ -0,0 +1,261 @@ +vector_bitwise.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+
+//! Vertical (lane-wise) vector-vector bitwise operations.
+
+macro_rules! impl_ops_vector_bitwise {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $test_tt:tt |
+        ($true:expr, $false:expr)
+    ) => {
+        impl crate::ops::Not for $id {
+            type Output = Self;
+            #[inline]
+            fn not(self) -> Self {
+                Self::splat($true) ^ self
+            }
+        }
+        impl crate::ops::BitXor for $id {
+            type Output = Self;
+            #[inline]
+            fn bitxor(self, other: Self) -> Self {
+                use crate::llvm::simd_xor;
+                unsafe { Simd(simd_xor(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::BitAnd for $id {
+            type Output = Self;
+            #[inline]
+            fn bitand(self, other: Self) -> Self {
+                use crate::llvm::simd_and;
+                unsafe { Simd(simd_and(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::BitOr for $id {
+            type Output = Self;
+            #[inline]
+            fn bitor(self, other: Self) -> Self {
+                use crate::llvm::simd_or;
+                unsafe { Simd(simd_or(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::BitAndAssign for $id {
+            #[inline]
+            fn bitand_assign(&mut self, other: Self) {
+                *self = *self & other;
+            }
+        }
+        impl crate::ops::BitOrAssign for $id {
+            #[inline]
+            fn bitor_assign(&mut self, other: Self) {
+                *self = *self | other;
+            }
+        }
+        impl crate::ops::BitXorAssign for $id {
+            #[inline]
+            fn bitxor_assign(&mut self, other: Self) {
+                *self = *self ^ other;
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_vector_bitwise>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn ops_vector_bitwise() {
+
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let m = $id::splat(!z.extract(0));
+
+                        // Not:
+                        assert_eq!(!z, m);
+                        assert_eq!(!m, z);
+
+                        // BitAnd:
+                        assert_eq!(o & o, o);
+                        assert_eq!(o & z, z);
+                        assert_eq!(z & o, z);
+                        assert_eq!(z & z, z);
+
+                        assert_eq!(t & t, t);
+                        assert_eq!(t & o, z);
+                        assert_eq!(o & t, z);
+
+                        // BitOr:
+                        assert_eq!(o | o, o);
+                        assert_eq!(o | z, o);
+                        assert_eq!(z | o, o);
+                        assert_eq!(z | z, z);
+
+                        assert_eq!(t | t, t);
+                        assert_eq!(z | t, t);
+                        assert_eq!(t | z, t);
+
+                        // BitXOR:
+                        assert_eq!(o ^ o, z);
+                        assert_eq!(z ^ z, z);
+                        assert_eq!(z ^ o, o);
+                        assert_eq!(o ^ z, o);
+
+                        assert_eq!(t ^ t, z);
+                        assert_eq!(t ^ z, t);
+                        assert_eq!(z ^ t, t);
+
+                        {
+                            // AndAssign:
+                            let mut v = o;
+                            v &= t;
+                            assert_eq!(v, z);
+                        }
+                        {
+                            // OrAssign:
+                            let mut v = z;
+                            v |= o;
+                            assert_eq!(v, o);
+                        }
+                        {
+                            // XORAssign:
+                            let mut v = z;
+                            v ^= o;
+                            assert_eq!(v, o);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_float_min_max.rs.html b/src/packed_simd/api/ops/vector_float_min_max.rs.html new file mode 100644 index 000000000..d3f648a6d --- /dev/null +++ b/src/packed_simd/api/ops/vector_float_min_max.rs.html @@ -0,0 +1,151 @@ +vector_float_min_max.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+
+//! Vertical (lane-wise) vector `min` and `max` for floating-point vectors.
+
+macro_rules! impl_ops_vector_float_min_max {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Minimum of two vectors.
+            ///
+            /// Returns a new vector containing the minimum value of each of
+            /// the input vector lanes.
+            #[inline]
+            pub fn min(self, x: Self) -> Self {
+                use crate::llvm::simd_fmin;
+                unsafe { Simd(simd_fmin(self.0, x.0)) }
+            }
+
+            /// Maximum of two vectors.
+            ///
+            /// Returns a new vector containing the maximum value of each of
+            /// the input vector lanes.
+            #[inline]
+            pub fn max(self, x: Self) -> Self {
+                use crate::llvm::simd_fmax;
+                unsafe { Simd(simd_fmax(self.0, x.0)) }
+            }
+        }
+        test_if!{
+            $test_tt:
+            paste::item! {
+                #[cfg(not(any(
+                    // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/223
+                    all(target_arch = "mips", target_endian = "big"),
+                    target_arch = "mips64",
+                )))]
+                pub mod [<$id _ops_vector_min_max>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn min_max() {
+                        let n = crate::$elem_ty::NAN;
+                        let o = $id::splat(1. as $elem_ty);
+                        let t = $id::splat(2. as $elem_ty);
+
+                        let mut m = o; // [1., 2., 1., 2., ...]
+                        let mut on = o;
+                        for i in 0..$id::lanes() {
+                            if i % 2 == 0 {
+                                m = m.replace(i, 2. as $elem_ty);
+                                on = on.replace(i, n);
+                            }
+                        }
+
+                        assert_eq!(o.min(t), o);
+                        assert_eq!(t.min(o), o);
+                        assert_eq!(m.min(o), o);
+                        assert_eq!(o.min(m), o);
+                        assert_eq!(m.min(t), m);
+                        assert_eq!(t.min(m), m);
+
+                        assert_eq!(o.max(t), t);
+                        assert_eq!(t.max(o), t);
+                        assert_eq!(m.max(o), m);
+                        assert_eq!(o.max(m), m);
+                        assert_eq!(m.max(t), t);
+                        assert_eq!(t.max(m), t);
+
+                        assert_eq!(on.min(o), o);
+                        assert_eq!(o.min(on), o);
+                        assert_eq!(on.max(o), o);
+                        assert_eq!(o.max(on), o);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_int_min_max.rs.html b/src/packed_simd/api/ops/vector_int_min_max.rs.html new file mode 100644 index 000000000..e4b046a12 --- /dev/null +++ b/src/packed_simd/api/ops/vector_int_min_max.rs.html @@ -0,0 +1,117 @@ +vector_int_min_max.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+
+//! Vertical (lane-wise) vector `min` and `max` for integer vectors.
+
+macro_rules! impl_ops_vector_int_min_max {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Minimum of two vectors.
+            ///
+            /// Returns a new vector containing the minimum value of each of
+            /// the input vector lanes.
+            #[inline]
+            pub fn min(self, x: Self) -> Self {
+                self.lt(x).select(self, x)
+            }
+
+            /// Maximum of two vectors.
+            ///
+            /// Returns a new vector containing the maximum value of each of
+            /// the input vector lanes.
+            #[inline]
+            pub fn max(self, x: Self) -> Self {
+                self.gt(x).select(self, x)
+            }
+        }
+        test_if!{$test_tt:
+        paste::item! {
+            pub mod [<$id _ops_vector_min_max>] {
+                use super::*;
+                #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                fn min_max() {
+                    let o = $id::splat(1 as $elem_ty);
+                    let t = $id::splat(2 as $elem_ty);
+
+                    let mut m = o;
+                    for i in 0..$id::lanes() {
+                        if i % 2 == 0 {
+                            m = m.replace(i, 2 as $elem_ty);
+                        }
+                    }
+                    assert_eq!(o.min(t), o);
+                    assert_eq!(t.min(o), o);
+                    assert_eq!(m.min(o), o);
+                    assert_eq!(o.min(m), o);
+                    assert_eq!(m.min(t), m);
+                    assert_eq!(t.min(m), m);
+
+                    assert_eq!(o.max(t), t);
+                    assert_eq!(t.max(o), t);
+                    assert_eq!(m.max(o), m);
+                    assert_eq!(o.max(m), m);
+                    assert_eq!(m.max(t), t);
+                    assert_eq!(t.max(m), t);
+                }
+            }
+        }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_mask_bitwise.rs.html b/src/packed_simd/api/ops/vector_mask_bitwise.rs.html new file mode 100644 index 000000000..8c6bb3dd7 --- /dev/null +++ b/src/packed_simd/api/ops/vector_mask_bitwise.rs.html @@ -0,0 +1,235 @@ +vector_mask_bitwise.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+
+//! Vertical (lane-wise) vector-vector bitwise operations.
+
+macro_rules! impl_ops_vector_mask_bitwise {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $test_tt:tt |
+        ($true:expr, $false:expr)
+    ) => {
+        impl crate::ops::Not for $id {
+            type Output = Self;
+            #[inline]
+            fn not(self) -> Self {
+                Self::splat($true) ^ self
+            }
+        }
+        impl crate::ops::BitXor for $id {
+            type Output = Self;
+            #[inline]
+            fn bitxor(self, other: Self) -> Self {
+                use crate::llvm::simd_xor;
+                unsafe { Simd(simd_xor(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::BitAnd for $id {
+            type Output = Self;
+            #[inline]
+            fn bitand(self, other: Self) -> Self {
+                use crate::llvm::simd_and;
+                unsafe { Simd(simd_and(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::BitOr for $id {
+            type Output = Self;
+            #[inline]
+            fn bitor(self, other: Self) -> Self {
+                use crate::llvm::simd_or;
+                unsafe { Simd(simd_or(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::BitAndAssign for $id {
+            #[inline]
+            fn bitand_assign(&mut self, other: Self) {
+                *self = *self & other;
+            }
+        }
+        impl crate::ops::BitOrAssign for $id {
+            #[inline]
+            fn bitor_assign(&mut self, other: Self) {
+                *self = *self | other;
+            }
+        }
+        impl crate::ops::BitXorAssign for $id {
+            #[inline]
+            fn bitxor_assign(&mut self, other: Self) {
+                *self = *self ^ other;
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_vector_mask_bitwise>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn ops_vector_mask_bitwise() {
+                        let t = $id::splat(true);
+                        let f = $id::splat(false);
+                        assert!(t != f);
+                        assert!(!(t == f));
+
+                        // Not:
+                        assert_eq!(!t, f);
+                        assert_eq!(t, !f);
+
+                        // BitAnd:
+                        assert_eq!(t & f, f);
+                        assert_eq!(f & t, f);
+                        assert_eq!(t & t, t);
+                        assert_eq!(f & f, f);
+
+                        // BitOr:
+                        assert_eq!(t | f, t);
+                        assert_eq!(f | t, t);
+                        assert_eq!(t | t, t);
+                        assert_eq!(f | f, f);
+
+                        // BitXOR:
+                        assert_eq!(t ^ f, t);
+                        assert_eq!(f ^ t, t);
+                        assert_eq!(t ^ t, f);
+                        assert_eq!(f ^ f, f);
+
+                        {
+                            // AndAssign:
+                            let mut v = f;
+                            v &= t;
+                            assert_eq!(v, f);
+                        }
+                        {
+                            // OrAssign:
+                            let mut v = f;
+                            v |= t;
+                            assert_eq!(v, t);
+                        }
+                        {
+                            // XORAssign:
+                            let mut v = f;
+                            v ^= t;
+                            assert_eq!(v, t);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_neg.rs.html b/src/packed_simd/api/ops/vector_neg.rs.html new file mode 100644 index 000000000..54a42ecb8 --- /dev/null +++ b/src/packed_simd/api/ops/vector_neg.rs.html @@ -0,0 +1,89 @@ +vector_neg.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+
+//! Vertical (lane-wise) vector `Neg`.
+
+macro_rules! impl_ops_vector_neg {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::ops::Neg for $id {
+            type Output = Self;
+            #[inline]
+            fn neg(self) -> Self {
+                Self::splat(-1 as $elem_ty) * self
+            }
+        }
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_vector_neg>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn neg() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let f = $id::splat(4 as $elem_ty);
+
+                        let nz = $id::splat(-(0 as $elem_ty));
+                        let no = $id::splat(-(1 as $elem_ty));
+                        let nt = $id::splat(-(2 as $elem_ty));
+                        let nf = $id::splat(-(4 as $elem_ty));
+
+                        assert_eq!(-z, nz);
+                        assert_eq!(-o, no);
+                        assert_eq!(-t, nt);
+                        assert_eq!(-f, nf);
+
+                        assert_eq!(z, -nz);
+                        assert_eq!(o, -no);
+                        assert_eq!(t, -nt);
+                        assert_eq!(f, -nf);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_rotates.rs.html b/src/packed_simd/api/ops/vector_rotates.rs.html new file mode 100644 index 000000000..192104591 --- /dev/null +++ b/src/packed_simd/api/ops/vector_rotates.rs.html @@ -0,0 +1,183 @@ +vector_rotates.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+
+//! Vertical (lane-wise) vector rotates operations.
+#![allow(unused)]
+
+macro_rules! impl_ops_vector_rotates {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Shifts the bits of each lane to the left by the specified
+            /// amount in the corresponding lane of `n`, wrapping the
+            /// truncated bits to the end of the resulting integer.
+            ///
+            /// Note: this is neither the same operation as `<<` nor equivalent
+            /// to `slice::rotate_left`.
+            #[inline]
+            pub fn rotate_left(self, n: $id) -> $id {
+                const LANE_WIDTH: $elem_ty =
+                    crate::mem::size_of::<$elem_ty>() as $elem_ty * 8;
+                // Protect against undefined behavior for over-long bit shifts
+                let n = n % LANE_WIDTH;
+                (self << n) | (self >> ((LANE_WIDTH - n) % LANE_WIDTH))
+            }
+
+            /// Shifts the bits of each lane to the right by the specified
+            /// amount in the corresponding lane of `n`, wrapping the
+            /// truncated bits to the beginning of the resulting integer.
+            ///
+            /// Note: this is neither the same operation as `<<` nor equivalent
+            /// to `slice::rotate_left`.
+            #[inline]
+            pub fn rotate_right(self, n: $id) -> $id {
+                const LANE_WIDTH: $elem_ty =
+                    crate::mem::size_of::<$elem_ty>() as $elem_ty * 8;
+                // Protect against undefined behavior for over-long bit shifts
+                let n = n % LANE_WIDTH;
+                (self >> n) | (self << ((LANE_WIDTH - n) % LANE_WIDTH))
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                // FIXME:
+                // https://github.com/rust-lang-nursery/packed_simd/issues/75
+                #[cfg(not(any(
+                    target_arch = "s390x",
+                    target_arch = "sparc64",
+                )))]
+                pub mod [<$id _ops_vector_rotate>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn rotate_ops() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let f = $id::splat(4 as $elem_ty);
+
+                        let max = $id::splat(
+                            (mem::size_of::<$elem_ty>() * 8 - 1) as $elem_ty);
+
+                        // rotate_right
+                        assert_eq!(z.rotate_right(z), z);
+                        assert_eq!(z.rotate_right(o), z);
+                        assert_eq!(z.rotate_right(t), z);
+
+                        assert_eq!(o.rotate_right(z), o);
+                        assert_eq!(t.rotate_right(z), t);
+                        assert_eq!(f.rotate_right(z), f);
+                        assert_eq!(f.rotate_right(max), f << 1);
+
+                        assert_eq!(o.rotate_right(o), o << max);
+                        assert_eq!(t.rotate_right(o), o);
+                        assert_eq!(t.rotate_right(t), o << max);
+                        assert_eq!(f.rotate_right(o), t);
+                        assert_eq!(f.rotate_right(t), o);
+
+                        // rotate_left
+                        assert_eq!(z.rotate_left(z), z);
+                        assert_eq!(o.rotate_left(z), o);
+                        assert_eq!(t.rotate_left(z), t);
+                        assert_eq!(f.rotate_left(z), f);
+                        assert_eq!(f.rotate_left(max), t);
+
+                        assert_eq!(o.rotate_left(o), t);
+                        assert_eq!(o.rotate_left(t), f);
+                        assert_eq!(t.rotate_left(o), f);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ops/vector_shifts.rs.html b/src/packed_simd/api/ops/vector_shifts.rs.html new file mode 100644 index 000000000..b98e6f209 --- /dev/null +++ b/src/packed_simd/api/ops/vector_shifts.rs.html @@ -0,0 +1,217 @@ +vector_shifts.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+
+//! Vertical (lane-wise) vector-vector shifts operations.
+
+macro_rules! impl_ops_vector_shifts {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl crate::ops::Shl<$id> for $id {
+            type Output = Self;
+            #[inline]
+            fn shl(self, other: Self) -> Self {
+                use crate::llvm::simd_shl;
+                unsafe { Simd(simd_shl(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::Shr<$id> for $id {
+            type Output = Self;
+            #[inline]
+            fn shr(self, other: Self) -> Self {
+                use crate::llvm::simd_shr;
+                unsafe { Simd(simd_shr(self.0, other.0)) }
+            }
+        }
+        impl crate::ops::ShlAssign<$id> for $id {
+            #[inline]
+            fn shl_assign(&mut self, other: Self) {
+                *self = *self << other;
+            }
+        }
+        impl crate::ops::ShrAssign<$id> for $id {
+            #[inline]
+            fn shr_assign(&mut self, other: Self) {
+                *self = *self >> other;
+            }
+        }
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _ops_vector_shifts>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg_attr(any(target_arch = "s390x", target_arch = "sparc64"),
+                               allow(unreachable_code,
+                                     unused_variables,
+                                     unused_mut)
+                    )]
+                    // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+                    fn ops_vector_shifts() {
+                        let z = $id::splat(0 as $elem_ty);
+                        let o = $id::splat(1 as $elem_ty);
+                        let t = $id::splat(2 as $elem_ty);
+                        let f = $id::splat(4 as $elem_ty);
+
+                        let max =$id::splat(
+                            (mem::size_of::<$elem_ty>() * 8 - 1) as $elem_ty
+                        );
+
+                        // shr
+                        assert_eq!(z >> z, z);
+                        assert_eq!(z >> o, z);
+                        assert_eq!(z >> t, z);
+                        assert_eq!(z >> t, z);
+
+                        #[cfg(any(target_arch = "s390x", target_arch = "sparc64"))] {
+                            // FIXME: rust produces bad codegen for shifts:
+                            // https://github.com/rust-lang-nursery/packed_simd/issues/13
+                            return;
+                        }
+
+                        assert_eq!(o >> z, o);
+                        assert_eq!(t >> z, t);
+                        assert_eq!(f >> z, f);
+                        assert_eq!(f >> max, z);
+
+                        assert_eq!(o >> o, z);
+                        assert_eq!(t >> o, o);
+                        assert_eq!(t >> t, z);
+                        assert_eq!(f >> o, t);
+                        assert_eq!(f >> t, o);
+                        assert_eq!(f >> max, z);
+
+                        // shl
+                        assert_eq!(z << z, z);
+                        assert_eq!(o << z, o);
+                        assert_eq!(t << z, t);
+                        assert_eq!(f << z, f);
+                        assert_eq!(f << max, z);
+
+                        assert_eq!(o << o, t);
+                        assert_eq!(o << t, f);
+                        assert_eq!(t << o, f);
+
+                        {
+                            // shr_assign
+                            let mut v = o;
+                            v >>= o;
+                            assert_eq!(v, z);
+                        }
+                        {
+                            // shl_assign
+                            let mut v = o;
+                            v <<= o;
+                            assert_eq!(v, t);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ptr.rs.html b/src/packed_simd/api/ptr.rs.html new file mode 100644 index 000000000..139cfda76 --- /dev/null +++ b/src/packed_simd/api/ptr.rs.html @@ -0,0 +1,11 @@ +ptr.rs.html -- source
1
+2
+3
+4
+
+//! Vector of pointers
+
+#[macro_use]
+mod gather_scatter;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/ptr/gather_scatter.rs.html b/src/packed_simd/api/ptr/gather_scatter.rs.html new file mode 100644 index 000000000..6d08da6b0 --- /dev/null +++ b/src/packed_simd/api/ptr/gather_scatter.rs.html @@ -0,0 +1,437 @@ +gather_scatter.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+
+//! Implements masked gather and scatters for vectors of pointers
+
+macro_rules! impl_ptr_read {
+    ([$elem_ty:ty; $elem_count:expr]: $id:ident, $mask_ty:ident
+     | $test_tt:tt) => {
+        impl<T> $id<T>
+        where
+            [T; $elem_count]: sealed::SimdArray,
+        {
+            /// Reads selected vector elements from memory.
+            ///
+            /// Instantiates a new vector by reading the values from `self` for
+            /// those lanes whose `mask` is `true`, and using the elements of
+            /// `value` otherwise.
+            ///
+            /// No memory is accessed for those lanes of `self` whose `mask` is
+            /// `false`.
+            ///
+            /// # Safety
+            ///
+            /// This method is unsafe because it dereferences raw pointers. The
+            /// pointers must be aligned to `mem::align_of::<T>()`.
+            #[inline]
+            pub unsafe fn read<M>(
+                self, mask: Simd<[M; $elem_count]>,
+                value: Simd<[T; $elem_count]>,
+            ) -> Simd<[T; $elem_count]>
+            where
+                M: sealed::Mask,
+                [M; $elem_count]: sealed::SimdArray,
+            {
+                use crate::llvm::simd_gather;
+                Simd(simd_gather(value.0, self.0, mask.0))
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                mod [<$id _read>] {
+                    use super::*;
+                    #[test]
+                    fn read() {
+                        let mut v = [0_i32; $elem_count];
+                        for i in 0..$elem_count {
+                            v[i] = i as i32;
+                        }
+
+                        let mut ptr = $id::<i32>::null();
+
+                        for i in 0..$elem_count {
+                            ptr = ptr.replace(i, unsafe {
+                                crate::mem::transmute(&v[i] as *const i32)
+                            });
+                        }
+
+                        // all mask elements are true:
+                        let mask = $mask_ty::splat(true);
+                        let def = Simd::<[i32; $elem_count]>::splat(42_i32);
+                        let r: Simd<[i32; $elem_count]> = unsafe {
+                            ptr.read(mask, def)
+                        };
+                        assert_eq!(
+                            r,
+                            Simd::<[i32; $elem_count]>::from_slice_unaligned(
+                                &v
+                            )
+                        );
+
+                        let mut mask = mask;
+                        for i in 0..$elem_count {
+                            if i % 2 != 0 {
+                                mask = mask.replace(i, false);
+                            }
+                        }
+
+                        // even mask elements are true, odd ones are false:
+                        let r: Simd<[i32; $elem_count]> = unsafe {
+                            ptr.read(mask, def)
+                        };
+                        let mut e = v;
+                        for i in 0..$elem_count {
+                            if i % 2 != 0 {
+                                e[i] = 42;
+                            }
+                        }
+                        assert_eq!(
+                            r,
+                            Simd::<[i32; $elem_count]>::from_slice_unaligned(
+                                &e
+                            )
+                        );
+
+                        // all mask elements are false:
+                        let mask = $mask_ty::splat(false);
+                        let def = Simd::<[i32; $elem_count]>::splat(42_i32);
+                        let r: Simd<[i32; $elem_count]> = unsafe {
+                            ptr.read(mask, def) }
+                        ;
+                        assert_eq!(r, def);
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! impl_ptr_write {
+    ([$elem_ty:ty; $elem_count:expr]: $id:ident, $mask_ty:ident
+     | $test_tt:tt) => {
+        impl<T> $id<T>
+        where
+            [T; $elem_count]: sealed::SimdArray,
+        {
+            /// Writes selected vector elements to memory.
+            ///
+            /// Writes the lanes of `values` for which the mask is `true` to
+            /// their corresponding memory addresses in `self`.
+            ///
+            /// No memory is accessed for those lanes of `self` whose `mask` is
+            /// `false`.
+            ///
+            /// Overlapping memory addresses of `self` are written to in order
+            /// from the lest-significant to the most-significant element.
+            ///
+            /// # Safety
+            ///
+            /// This method is unsafe because it dereferences raw pointers. The
+            /// pointers must be aligned to `mem::align_of::<T>()`.
+            #[inline]
+            pub unsafe fn write<M>(
+                self, mask: Simd<[M; $elem_count]>,
+                value: Simd<[T; $elem_count]>,
+            ) where
+                M: sealed::Mask,
+                [M; $elem_count]: sealed::SimdArray,
+            {
+                use crate::llvm::simd_scatter;
+                simd_scatter(value.0, self.0, mask.0)
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                mod [<$id _write>] {
+                    use super::*;
+                    #[test]
+                    fn write() {
+                        // fourty_two = [42, 42, 42, ...]
+                        let fourty_two
+                            = Simd::<[i32; $elem_count]>::splat(42_i32);
+
+                        // This test will write to this array
+                        let mut arr = [0_i32; $elem_count];
+                        for i in 0..$elem_count {
+                            arr[i] = i as i32;
+                        }
+                        // arr = [0, 1, 2, ...]
+
+                        let mut ptr = $id::<i32>::null();
+                        for i in 0..$elem_count {
+                            ptr = ptr.replace(i, unsafe {
+                                crate::mem::transmute(arr.as_ptr().add(i))
+                            });
+                        }
+                        // ptr = [&arr[0], &arr[1], ...]
+
+                        // write `fourty_two` to all elements of `v`
+                        {
+                            let backup = arr;
+                            unsafe {
+                                ptr.write($mask_ty::splat(true), fourty_two)
+                            };
+                            assert_eq!(arr, [42_i32; $elem_count]);
+                            arr = backup;  // arr = [0, 1, 2, ...]
+                        }
+
+                        // write 42 to even elements of arr:
+                        {
+                            // set odd elements of the mask to false
+                            let mut mask = $mask_ty::splat(true);
+                            for i in 0..$elem_count {
+                                if i % 2 != 0 {
+                                    mask = mask.replace(i, false);
+                                }
+                            }
+                            // mask = [true, false, true, false, ...]
+
+                            // expected result r = [42, 1, 42, 3, 42, 5, ...]
+                            let mut r = arr;
+                            for i in 0..$elem_count {
+                                if i % 2 == 0 {
+                                    r[i] = 42;
+                                }
+                            }
+
+                            let backup = arr;
+                            unsafe { ptr.write(mask, fourty_two) };
+                            assert_eq!(arr, r);
+                            arr = backup;  // arr = [0, 1, 2, 3, ...]
+                        }
+
+                        // write 42 to no elements of arr
+                        {
+                            let backup = arr;
+                            unsafe {
+                                ptr.write($mask_ty::splat(false), fourty_two)
+                            };
+                            assert_eq!(arr, backup);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/reductions.rs.html b/src/packed_simd/api/reductions.rs.html new file mode 100644 index 000000000..a325f4e3a --- /dev/null +++ b/src/packed_simd/api/reductions.rs.html @@ -0,0 +1,27 @@ +reductions.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+
+//! Reductions
+
+#[macro_use]
+mod float_arithmetic;
+#[macro_use]
+mod integer_arithmetic;
+#[macro_use]
+mod bitwise;
+#[macro_use]
+mod mask;
+#[macro_use]
+mod min_max;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/reductions/bitwise.rs.html b/src/packed_simd/api/reductions/bitwise.rs.html new file mode 100644 index 000000000..573377fa9 --- /dev/null +++ b/src/packed_simd/api/reductions/bitwise.rs.html @@ -0,0 +1,305 @@ +bitwise.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+
+//! Implements portable horizontal bitwise vector reductions.
+#![allow(unused)]
+
+macro_rules! impl_reduction_bitwise {
+    (
+        [$elem_ty:ident; $elem_count:expr]:
+        $id:ident | $ielem_ty:ident | $test_tt:tt |
+        ($convert:expr) |
+        ($true:expr, $false:expr)
+    ) => {
+        impl $id {
+            /// Lane-wise bitwise `and` of the vector elements.
+            ///
+            /// Note: if the vector has one lane, the first element of the
+            /// vector is returned.
+            #[inline]
+            pub fn and(self) -> $elem_ty {
+                #[cfg(not(target_arch = "aarch64"))]
+                {
+                    use crate::llvm::simd_reduce_and;
+                    let r: $ielem_ty = unsafe { simd_reduce_and(self.0) };
+                    $convert(r)
+                }
+                #[cfg(target_arch = "aarch64")]
+                {
+                    // FIXME: broken on aarch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    let mut x = self.extract(0) as $elem_ty;
+                    for i in 1..$id::lanes() {
+                        x &= self.extract(i) as $elem_ty;
+                    }
+                    x
+                }
+            }
+
+            /// Lane-wise bitwise `or` of the vector elements.
+            ///
+            /// Note: if the vector has one lane, the first element of the
+            /// vector is returned.
+            #[inline]
+            pub fn or(self) -> $elem_ty {
+                #[cfg(not(target_arch = "aarch64"))]
+                {
+                    use crate::llvm::simd_reduce_or;
+                    let r: $ielem_ty = unsafe { simd_reduce_or(self.0) };
+                    $convert(r)
+                }
+                #[cfg(target_arch = "aarch64")]
+                {
+                    // FIXME: broken on aarch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    let mut x = self.extract(0) as $elem_ty;
+                    for i in 1..$id::lanes() {
+                        x |= self.extract(i) as $elem_ty;
+                    }
+                    x
+                }
+            }
+
+            /// Lane-wise bitwise `xor` of the vector elements.
+            ///
+            /// Note: if the vector has one lane, the first element of the
+            /// vector is returned.
+            #[inline]
+            pub fn xor(self) -> $elem_ty {
+                #[cfg(not(target_arch = "aarch64"))]
+                {
+                    use crate::llvm::simd_reduce_xor;
+                    let r: $ielem_ty = unsafe { simd_reduce_xor(self.0) };
+                    $convert(r)
+                }
+                #[cfg(target_arch = "aarch64")]
+                {
+                    // FIXME: broken on aarch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    let mut x = self.extract(0) as $elem_ty;
+                    for i in 1..$id::lanes() {
+                        x ^= self.extract(i) as $elem_ty;
+                    }
+                    x
+                }
+            }
+        }
+
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _reduction_bitwise>] {
+                    use super::*;
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn and() {
+                        let v = $id::splat($false);
+                        assert_eq!(v.and(), $false);
+                        let v = $id::splat($true);
+                        assert_eq!(v.and(), $true);
+                        let v = $id::splat($false);
+                        let v = v.replace(0, $true);
+                        if $id::lanes() > 1 {
+                            assert_eq!(v.and(), $false);
+                        } else {
+                            assert_eq!(v.and(), $true);
+                        }
+                        let v = $id::splat($true);
+                        let v = v.replace(0, $false);
+                        assert_eq!(v.and(), $false);
+
+                    }
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn or() {
+                        let v = $id::splat($false);
+                        assert_eq!(v.or(), $false);
+                        let v = $id::splat($true);
+                        assert_eq!(v.or(), $true);
+                        let v = $id::splat($false);
+                        let v = v.replace(0, $true);
+                        assert_eq!(v.or(), $true);
+                        let v = $id::splat($true);
+                        let v = v.replace(0, $false);
+                        if $id::lanes() > 1 {
+                            assert_eq!(v.or(), $true);
+                        } else {
+                            assert_eq!(v.or(), $false);
+                        }
+                    }
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn xor() {
+                        let v = $id::splat($false);
+                        assert_eq!(v.xor(), $false);
+                        let v = $id::splat($true);
+                        if $id::lanes() > 1 {
+                            assert_eq!(v.xor(), $false);
+                        } else {
+                            assert_eq!(v.xor(), $true);
+                        }
+                        let v = $id::splat($false);
+                        let v = v.replace(0, $true);
+                        assert_eq!(v.xor(), $true);
+                        let v = $id::splat($true);
+                        let v = v.replace(0, $false);
+                        if $id::lanes() > 1 {
+                            assert_eq!(v.xor(), $true);
+                        } else {
+                            assert_eq!(v.xor(), $false);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/reductions/float_arithmetic.rs.html b/src/packed_simd/api/reductions/float_arithmetic.rs.html new file mode 100644 index 000000000..11c43fc9e --- /dev/null +++ b/src/packed_simd/api/reductions/float_arithmetic.rs.html @@ -0,0 +1,627 @@ +float_arithmetic.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+
+//! Implements portable horizontal float vector arithmetic reductions.
+
+macro_rules! impl_reduction_float_arithmetic {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Horizontal sum of the vector elements.
+            ///
+            /// The intrinsic performs a tree-reduction of the vector elements.
+            /// That is, for an 8 element vector:
+            ///
+            /// > ((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))
+            ///
+            /// If one of the vector element is `NaN` the reduction returns
+            /// `NaN`. The resulting `NaN` is not required to be equal to any
+            /// of the `NaN`s in the vector.
+            #[inline]
+            pub fn sum(self) -> $elem_ty {
+                #[cfg(not(target_arch = "aarch64"))]
+                {
+                    use crate::llvm::simd_reduce_add_ordered;
+                    unsafe { simd_reduce_add_ordered(self.0, 0 as $elem_ty) }
+                }
+                #[cfg(target_arch = "aarch64")]
+                {
+                    // FIXME: broken on AArch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    let mut x = self.extract(0) as $elem_ty;
+                    for i in 1..$id::lanes() {
+                        x += self.extract(i) as $elem_ty;
+                    }
+                    x
+                }
+            }
+
+            /// Horizontal product of the vector elements.
+            ///
+            /// The intrinsic performs a tree-reduction of the vector elements.
+            /// That is, for an 8 element vector:
+            ///
+            /// > ((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))
+            ///
+            /// If one of the vector element is `NaN` the reduction returns
+            /// `NaN`. The resulting `NaN` is not required to be equal to any
+            /// of the `NaN`s in the vector.
+            #[inline]
+            pub fn product(self) -> $elem_ty {
+                #[cfg(not(target_arch = "aarch64"))]
+                {
+                    use crate::llvm::simd_reduce_mul_ordered;
+                    unsafe { simd_reduce_mul_ordered(self.0, 1 as $elem_ty) }
+                }
+                #[cfg(target_arch = "aarch64")]
+                {
+                    // FIXME: broken on AArch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    let mut x = self.extract(0) as $elem_ty;
+                    for i in 1..$id::lanes() {
+                        x *= self.extract(i) as $elem_ty;
+                    }
+                    x
+                }
+            }
+        }
+
+        impl crate::iter::Sum for $id {
+            #[inline]
+            fn sum<I: Iterator<Item = $id>>(iter: I) -> $id {
+                iter.fold($id::splat(0.), crate::ops::Add::add)
+            }
+        }
+
+        impl crate::iter::Product for $id {
+            #[inline]
+            fn product<I: Iterator<Item = $id>>(iter: I) -> $id {
+                iter.fold($id::splat(1.), crate::ops::Mul::mul)
+            }
+        }
+
+        impl<'a> crate::iter::Sum<&'a $id> for $id {
+            #[inline]
+            fn sum<I: Iterator<Item = &'a $id>>(iter: I) -> $id {
+                iter.fold($id::splat(0.), |a, b| crate::ops::Add::add(a, *b))
+            }
+        }
+
+        impl<'a> crate::iter::Product<&'a $id> for $id {
+            #[inline]
+            fn product<I: Iterator<Item = &'a $id>>(iter: I) -> $id {
+                iter.fold($id::splat(1.), |a, b| crate::ops::Mul::mul(a, *b))
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _reduction_float_arith>] {
+                    use super::*;
+                    fn alternating(x: usize) -> $id {
+                        let mut v = $id::splat(1 as $elem_ty);
+                        for i in 0..$id::lanes() {
+                            if i % x == 0 {
+                                v = v.replace(i, 2 as $elem_ty);
+                            }
+                        }
+                        v
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn sum() {
+                        let v = $id::splat(0 as $elem_ty);
+                        assert_eq!(v.sum(), 0 as $elem_ty);
+                        let v = $id::splat(1 as $elem_ty);
+                        assert_eq!(v.sum(), $id::lanes() as $elem_ty);
+                        let v = alternating(2);
+                        assert_eq!(
+                            v.sum(),
+                            ($id::lanes() / 2 + $id::lanes()) as $elem_ty
+                        );
+                    }
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn product() {
+                        let v = $id::splat(0 as $elem_ty);
+                        assert_eq!(v.product(), 0 as $elem_ty);
+                        let v = $id::splat(1 as $elem_ty);
+                        assert_eq!(v.product(), 1 as $elem_ty);
+                        let f = match $id::lanes() {
+                            64 => 16,
+                            32 => 8,
+                            16 => 4,
+                            _ => 2,
+                        };
+                        let v = alternating(f);
+                        assert_eq!(
+                            v.product(),
+                            (2_usize.pow(($id::lanes() / f) as u32)
+                             as $elem_ty)
+                        );
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[allow(unreachable_code)]
+                    #[allow(unused_mut)]
+                    // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+                    fn sum_nan() {
+                        // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36732
+                        // https://github.com/rust-lang-nursery/packed_simd/issues/6
+                        return;
+
+                        let n0 = crate::$elem_ty::NAN;
+                        let v0 = $id::splat(-3.0);
+                        for i in 0..$id::lanes() {
+                            let mut v = v0.replace(i, n0);
+                            // If the vector contains a NaN the result is NaN:
+                            assert!(
+                                v.sum().is_nan(),
+                                "nan at {} => {} | {:?}",
+                                i,
+                                v.sum(),
+                                v
+                            );
+                            for j in 0..i {
+                                v = v.replace(j, n0);
+                                assert!(v.sum().is_nan());
+                            }
+                        }
+                        let v = $id::splat(n0);
+                        assert!(v.sum().is_nan(), "all nans | {:?}", v);
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[allow(unreachable_code)]
+                    #[allow(unused_mut)]
+                    // ^^^ FIXME: https://github.com/rust-lang/rust/issues/55344
+                    fn product_nan() {
+                        // FIXME: https://bugs.llvm.org/show_bug.cgi?id=36732
+                        // https://github.com/rust-lang-nursery/packed_simd/issues/6
+                        return;
+
+                        let n0 = crate::$elem_ty::NAN;
+                        let v0 = $id::splat(-3.0);
+                        for i in 0..$id::lanes() {
+                            let mut v = v0.replace(i, n0);
+                            // If the vector contains a NaN the result is NaN:
+                            assert!(
+                                v.product().is_nan(),
+                                "nan at {} => {} | {:?}",
+                                i,
+                                v.product(),
+                                v
+                            );
+                            for j in 0..i {
+                                v = v.replace(j, n0);
+                                assert!(v.product().is_nan());
+                            }
+                        }
+                        let v = $id::splat(n0);
+                        assert!(v.product().is_nan(), "all nans | {:?}", v);
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[allow(unused, dead_code)]
+                    fn sum_roundoff() {
+                        // Performs a tree-reduction
+                        fn tree_reduce_sum(a: &[$elem_ty]) -> $elem_ty {
+                            assert!(!a.is_empty());
+                            if a.len() == 1 {
+                                a[0]
+                            } else if a.len() == 2 {
+                                a[0] + a[1]
+                            } else {
+                                let mid = a.len() / 2;
+                                let (left, right) = a.split_at(mid);
+                                tree_reduce_sum(left) + tree_reduce_sum(right)
+                            }
+                        }
+
+                        let mut start = crate::$elem_ty::EPSILON;
+                        let mut scalar_reduction = 0. as $elem_ty;
+
+                        let mut v = $id::splat(0. as $elem_ty);
+                        for i in 0..$id::lanes() {
+                            let c = if i % 2 == 0 { 1e3 } else { -1. };
+                            start *= 3.14 * c;
+                            scalar_reduction += start;
+                            v = v.replace(i, start);
+                        }
+                        let simd_reduction = v.sum();
+
+                        let mut a = [0. as $elem_ty; $id::lanes()];
+                        v.write_to_slice_unaligned(&mut a);
+                        let tree_reduction = tree_reduce_sum(&a);
+
+                        // tolerate 1 ULP difference:
+                        let red_bits = simd_reduction.to_bits();
+                        let tree_bits = tree_reduction.to_bits();
+                        assert!(
+                            if red_bits > tree_bits {
+                                red_bits - tree_bits
+                            } else {
+                                tree_bits - red_bits
+                            } < 2,
+                            "vector: {:?} | simd_reduction: {:?} | \
+                             tree_reduction: {} | scalar_reduction: {}",
+                            v,
+                            simd_reduction,
+                            tree_reduction,
+                            scalar_reduction
+                        );
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[allow(unused, dead_code)]
+                    fn product_roundoff() {
+                        // Performs a tree-reduction
+                        fn tree_reduce_product(a: &[$elem_ty]) -> $elem_ty {
+                            assert!(!a.is_empty());
+                            if a.len() == 1 {
+                                a[0]
+                            } else if a.len() == 2 {
+                                a[0] * a[1]
+                            } else {
+                                let mid = a.len() / 2;
+                                let (left, right) = a.split_at(mid);
+                                tree_reduce_product(left)
+                                    * tree_reduce_product(right)
+                            }
+                        }
+
+                        let mut start = crate::$elem_ty::EPSILON;
+                        let mut scalar_reduction = 1. as $elem_ty;
+
+                        let mut v = $id::splat(0. as $elem_ty);
+                        for i in 0..$id::lanes() {
+                            let c = if i % 2 == 0 { 1e3 } else { -1. };
+                            start *= 3.14 * c;
+                            scalar_reduction *= start;
+                            v = v.replace(i, start);
+                        }
+                        let simd_reduction = v.product();
+
+                        let mut a = [0. as $elem_ty; $id::lanes()];
+                        v.write_to_slice_unaligned(&mut a);
+                        let tree_reduction = tree_reduce_product(&a);
+
+                        // tolerate 1 ULP difference:
+                        let red_bits = simd_reduction.to_bits();
+                        let tree_bits = tree_reduction.to_bits();
+                        assert!(
+                            if red_bits > tree_bits {
+                                red_bits - tree_bits
+                            } else {
+                                tree_bits - red_bits
+                            } < 2,
+                            "vector: {:?} | simd_reduction: {:?} | \
+                             tree_reduction: {} | scalar_reduction: {}",
+                            v,
+                            simd_reduction,
+                            tree_reduction,
+                            scalar_reduction
+                        );
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/reductions/integer_arithmetic.rs.html b/src/packed_simd/api/reductions/integer_arithmetic.rs.html new file mode 100644 index 000000000..a69b8a511 --- /dev/null +++ b/src/packed_simd/api/reductions/integer_arithmetic.rs.html @@ -0,0 +1,397 @@ +integer_arithmetic.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+
+//! Implements portable horizontal integer vector arithmetic reductions.
+
+macro_rules! impl_reduction_integer_arithmetic {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $ielem_ty:ident
+     | $test_tt:tt) => {
+        impl $id {
+            /// Horizontal wrapping sum of the vector elements.
+            ///
+            /// The intrinsic performs a tree-reduction of the vector elements.
+            /// That is, for an 8 element vector:
+            ///
+            /// > ((x0 + x1) + (x2 + x3)) + ((x4 + x5) + (x6 + x7))
+            ///
+            /// If an operation overflows it returns the mathematical result
+            /// modulo `2^n` where `n` is the number of times it overflows.
+            #[inline]
+            pub fn wrapping_sum(self) -> $elem_ty {
+                #[cfg(not(target_arch = "aarch64"))]
+                {
+                    use crate::llvm::simd_reduce_add_ordered;
+                    let v: $ielem_ty = unsafe {
+                        simd_reduce_add_ordered(self.0, 0 as $ielem_ty)
+                    };
+                    v as $elem_ty
+                }
+                #[cfg(target_arch = "aarch64")]
+                {
+                    // FIXME: broken on AArch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    let mut x = self.extract(0) as $elem_ty;
+                    for i in 1..$id::lanes() {
+                        x = x.wrapping_add(self.extract(i) as $elem_ty);
+                    }
+                    x
+                }
+            }
+
+            /// Horizontal wrapping product of the vector elements.
+            ///
+            /// The intrinsic performs a tree-reduction of the vector elements.
+            /// That is, for an 8 element vector:
+            ///
+            /// > ((x0 * x1) * (x2 * x3)) * ((x4 * x5) * (x6 * x7))
+            ///
+            /// If an operation overflows it returns the mathematical result
+            /// modulo `2^n` where `n` is the number of times it overflows.
+            #[inline]
+            pub fn wrapping_product(self) -> $elem_ty {
+                #[cfg(not(target_arch = "aarch64"))]
+                {
+                    use crate::llvm::simd_reduce_mul_ordered;
+                    let v: $ielem_ty = unsafe {
+                        simd_reduce_mul_ordered(self.0, 1 as $ielem_ty)
+                    };
+                    v as $elem_ty
+                }
+                #[cfg(target_arch = "aarch64")]
+                {
+                    // FIXME: broken on AArch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    let mut x = self.extract(0) as $elem_ty;
+                    for i in 1..$id::lanes() {
+                        x = x.wrapping_mul(self.extract(i) as $elem_ty);
+                    }
+                    x
+                }
+            }
+        }
+
+        impl crate::iter::Sum for $id {
+            #[inline]
+            fn sum<I: Iterator<Item = $id>>(iter: I) -> $id {
+                iter.fold($id::splat(0), crate::ops::Add::add)
+            }
+        }
+
+        impl crate::iter::Product for $id {
+            #[inline]
+            fn product<I: Iterator<Item = $id>>(iter: I) -> $id {
+                iter.fold($id::splat(1), crate::ops::Mul::mul)
+            }
+        }
+
+        impl<'a> crate::iter::Sum<&'a $id> for $id {
+            #[inline]
+            fn sum<I: Iterator<Item = &'a $id>>(iter: I) -> $id {
+                iter.fold($id::splat(0), |a, b| crate::ops::Add::add(a, *b))
+            }
+        }
+
+        impl<'a> crate::iter::Product<&'a $id> for $id {
+            #[inline]
+            fn product<I: Iterator<Item = &'a $id>>(iter: I) -> $id {
+                iter.fold($id::splat(1), |a, b| crate::ops::Mul::mul(a, *b))
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _reduction_int_arith>] {
+                    use super::*;
+
+                    fn alternating(x: usize) -> $id {
+                        let mut v = $id::splat(1 as $elem_ty);
+                        for i in 0..$id::lanes() {
+                            if i % x == 0 {
+                                v = v.replace(i, 2 as $elem_ty);
+                            }
+                        }
+                        v
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn wrapping_sum() {
+                        let v = $id::splat(0 as $elem_ty);
+                        assert_eq!(v.wrapping_sum(), 0 as $elem_ty);
+                        let v = $id::splat(1 as $elem_ty);
+                        assert_eq!(v.wrapping_sum(), $id::lanes() as $elem_ty);
+                        let v = alternating(2);
+                        if $id::lanes() > 1 {
+                            assert_eq!(
+                                v.wrapping_sum(),
+                                ($id::lanes() / 2 + $id::lanes()) as $elem_ty
+                            );
+                        } else {
+                            assert_eq!(
+                                v.wrapping_sum(),
+                                2 as $elem_ty
+                            );
+                        }
+                    }
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn wrapping_sum_overflow() {
+                        let start = $elem_ty::max_value()
+                            - ($id::lanes() as $elem_ty / 2);
+
+                        let v = $id::splat(start as $elem_ty);
+                        let vwrapping_sum = v.wrapping_sum();
+
+                        let mut wrapping_sum = start;
+                        for _ in 1..$id::lanes() {
+                            wrapping_sum = wrapping_sum.wrapping_add(start);
+                        }
+                        assert_eq!(wrapping_sum, vwrapping_sum, "v = {:?}", v);
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn wrapping_product() {
+                        let v = $id::splat(0 as $elem_ty);
+                        assert_eq!(v.wrapping_product(), 0 as $elem_ty);
+                        let v = $id::splat(1 as $elem_ty);
+                        assert_eq!(v.wrapping_product(), 1 as $elem_ty);
+                        let f = match $id::lanes() {
+                            64 => 16,
+                            32 => 8,
+                            16 => 4,
+                            _ => 2,
+                        };
+                        let v = alternating(f);
+                        if $id::lanes() > 1 {
+                            assert_eq!(
+                                v.wrapping_product(),
+                                (2_usize.pow(($id::lanes() / f) as u32)
+                                 as $elem_ty)
+                            );
+                        } else {
+                            assert_eq!(
+                                v.wrapping_product(),
+                                2 as $elem_ty
+                            );
+                        }
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn wrapping_product_overflow() {
+                        let start = $elem_ty::max_value()
+                            - ($id::lanes() as $elem_ty / 2);
+
+                        let v = $id::splat(start as $elem_ty);
+                        let vmul = v.wrapping_product();
+
+                        let mut mul = start;
+                        for _ in 1..$id::lanes() {
+                            mul = mul.wrapping_mul(start);
+                        }
+                        assert_eq!(mul, vmul, "v = {:?}", v);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/reductions/mask.rs.html b/src/packed_simd/api/reductions/mask.rs.html new file mode 100644 index 000000000..29f270a74 --- /dev/null +++ b/src/packed_simd/api/reductions/mask.rs.html @@ -0,0 +1,181 @@ +mask.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+
+//! Implements portable horizontal mask reductions.
+
+macro_rules! impl_reduction_mask {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Are `all` vector lanes `true`?
+            #[inline]
+            pub fn all(self) -> bool {
+                unsafe { crate::codegen::reductions::mask::All::all(self) }
+            }
+            /// Is `any` vector lane `true`?
+            #[inline]
+            pub fn any(self) -> bool {
+                unsafe { crate::codegen::reductions::mask::Any::any(self) }
+            }
+            /// Are `all` vector lanes `false`?
+            #[inline]
+            pub fn none(self) -> bool {
+                !self.any()
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _reduction>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn all() {
+                        let a = $id::splat(true);
+                        assert!(a.all());
+                        let a = $id::splat(false);
+                        assert!(!a.all());
+
+                        if $id::lanes() > 1 {
+                            for i in 0..$id::lanes() {
+                                let mut a = $id::splat(true);
+                                a = a.replace(i, false);
+                                assert!(!a.all());
+                                let mut a = $id::splat(false);
+                                a = a.replace(i, true);
+                                assert!(!a.all());
+                            }
+                        }
+                    }
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn any() {
+                        let a = $id::splat(true);
+                        assert!(a.any());
+                        let a = $id::splat(false);
+                        assert!(!a.any());
+
+                        if $id::lanes() > 1 {
+                            for i in 0..$id::lanes() {
+                                let mut a = $id::splat(true);
+                                a = a.replace(i, false);
+                                assert!(a.any());
+                                let mut a = $id::splat(false);
+                                a = a.replace(i, true);
+                                assert!(a.any());
+                            }
+                        }
+                    }
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn none() {
+                        let a = $id::splat(true);
+                        assert!(!a.none());
+                        let a = $id::splat(false);
+                        assert!(a.none());
+
+                        if $id::lanes() > 1 {
+                            for i in 0..$id::lanes() {
+                                let mut a = $id::splat(true);
+                                a = a.replace(i, false);
+                                assert!(!a.none());
+                                let mut a = $id::splat(false);
+                                a = a.replace(i, true);
+                                assert!(!a.none());
+                            }
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/reductions/min_max.rs.html b/src/packed_simd/api/reductions/min_max.rs.html new file mode 100644 index 000000000..661f31d41 --- /dev/null +++ b/src/packed_simd/api/reductions/min_max.rs.html @@ -0,0 +1,757 @@ +min_max.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+
+//! Implements portable horizontal vector min/max reductions.
+
+macro_rules! impl_reduction_min_max {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident
+     | $ielem_ty:ident | $test_tt:tt) => {
+        impl $id {
+            /// Largest vector element value.
+            #[inline]
+            pub fn max_element(self) -> $elem_ty {
+                #[cfg(not(any(
+                    target_arch = "aarch64",
+                    target_arch = "arm",
+                    target_arch = "powerpc64",
+                    target_arch = "wasm32",
+                )))]
+                {
+                    use crate::llvm::simd_reduce_max;
+                    let v: $ielem_ty = unsafe { simd_reduce_max(self.0) };
+                    v as $elem_ty
+                }
+                #[cfg(any(
+                    target_arch = "aarch64",
+                    target_arch = "arm",
+                    target_arch = "powerpc64",
+                    target_arch = "wasm32",
+                ))]
+                {
+                    // FIXME: broken on AArch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    // FIXME: broken on WASM32
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/91
+                    let mut x = self.extract(0);
+                    for i in 1..$id::lanes() {
+                        x = x.max(self.extract(i));
+                    }
+                    x
+                }
+            }
+
+            /// Smallest vector element value.
+            #[inline]
+            pub fn min_element(self) -> $elem_ty {
+                #[cfg(not(any(
+                    target_arch = "aarch64",
+                    target_arch = "arm",
+                    all(target_arch = "x86", not(target_feature = "sse2")),
+                    target_arch = "powerpc64",
+                    target_arch = "wasm32",
+                ),))]
+                {
+                    use crate::llvm::simd_reduce_min;
+                    let v: $ielem_ty = unsafe { simd_reduce_min(self.0) };
+                    v as $elem_ty
+                }
+                #[cfg(any(
+                    target_arch = "aarch64",
+                    target_arch = "arm",
+                    all(target_arch = "x86", not(target_feature = "sse2")),
+                    target_arch = "powerpc64",
+                    target_arch = "wasm32",
+                ))]
+                {
+                    // FIXME: broken on AArch64
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/15
+                    // FIXME: broken on i586-unknown-linux-gnu
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/22
+                    // FIXME: broken on WASM32
+                    // https://github.com/rust-lang-nursery/packed_simd/issues/91
+                    let mut x = self.extract(0);
+                    for i in 1..$id::lanes() {
+                        x = x.min(self.extract(i));
+                    }
+                    x
+                }
+            }
+        }
+        test_if! {$test_tt:
+        paste::item! {
+            pub mod [<$id _reduction_min_max>] {
+                use super::*;
+                #[cfg_attr(not(target_arch = "wasm32"), test)]
+                #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                pub fn max_element() {
+                    let v = $id::splat(0 as $elem_ty);
+                    assert_eq!(v.max_element(), 0 as $elem_ty);
+                    if $id::lanes() > 1 {
+                        let v = v.replace(1, 1 as $elem_ty);
+                        assert_eq!(v.max_element(), 1 as $elem_ty);
+                    }
+                    let v = v.replace(0, 2 as $elem_ty);
+                    assert_eq!(v.max_element(), 2 as $elem_ty);
+                }
+
+                #[cfg_attr(not(target_arch = "wasm32"), test)]
+                #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                pub fn min_element() {
+                    let v = $id::splat(0 as $elem_ty);
+                    assert_eq!(v.min_element(), 0 as $elem_ty);
+                    if $id::lanes() > 1 {
+                        let v = v.replace(1, 1 as $elem_ty);
+                        assert_eq!(v.min_element(), 0 as $elem_ty);
+                    }
+                    let v = $id::splat(1 as $elem_ty);
+                    let v = v.replace(0, 2 as $elem_ty);
+                    if $id::lanes() > 1 {
+                        assert_eq!(v.min_element(), 1 as $elem_ty);
+                    } else {
+                        assert_eq!(v.min_element(), 2 as $elem_ty);
+                    }
+                    if $id::lanes() > 1 {
+                        let v = $id::splat(2 as $elem_ty);
+                        let v = v.replace(1, 1 as $elem_ty);
+                        assert_eq!(v.min_element(), 1 as $elem_ty);
+                    }
+                }
+            }
+        }
+        }
+    };
+}
+
+macro_rules! test_reduction_float_min_max {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        test_if!{
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _reduction_min_max_nan>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn min_element_test() {
+                        let n = crate::$elem_ty::NAN;
+
+                        assert_eq!(n.min(-3.), -3.);
+                        assert_eq!((-3. as $elem_ty).min(n), -3.);
+
+                        let v0 = $id::splat(-3.);
+
+                        let target_with_broken_last_lane_nan = !cfg!(any(
+                            target_arch = "arm", target_arch = "aarch64",
+                            all(target_arch = "x86",
+                                not(target_feature = "sse2")
+                            ),
+                            target_arch = "powerpc64",
+                            target_arch = "wasm32",
+                        ));
+
+                        // The vector is initialized to `-3.`s: [-3, -3, -3, -3]
+                        for i in 0..$id::lanes() {
+                            // We replace the i-th element of the vector with
+                            // `NaN`: [-3, -3, -3, NaN]
+                            let mut v = v0.replace(i, n);
+
+                            // If the NaN is in the last place, the LLVM
+                            // implementation of these methods is broken on some
+                            // targets:
+                            if i == $id::lanes() - 1 &&
+                                target_with_broken_last_lane_nan {
+                                // FIXME:
+                                // https://github.com/rust-lang-nursery/packed_simd/issues/5
+                                //
+                                // If there is a NaN, the result should always
+                                // the smallest element, but currently when the
+                                // last element is NaN the current
+                                // implementation incorrectly returns NaN.
+                                //
+                                // The targets mentioned above use different
+                                // codegen that produces the correct result.
+                                //
+                                // These asserts detect if this behavior changes
+                                    assert!(v.min_element().is_nan(),
+                                            // FIXME: ^^^ should be -3.
+                                            "[A]: nan at {} => {} | {:?}",
+                                            i, v.min_element(), v);
+
+                                // If we replace all the elements in the vector
+                                // up-to the `i-th` lane with `NaN`s, the result
+                                // is still always `-3.` unless all elements of
+                                // the vector are `NaN`s:
+                                //
+                                // This is also broken:
+                                for j in 0..i {
+                                    v = v.replace(j, n);
+                                    assert!(v.min_element().is_nan(),
+                                            // FIXME: ^^^ should be -3.
+                                            "[B]: nan at {} => {} | {:?}",
+                                            i, v.min_element(), v);
+                                }
+
+                                // We are done here, since we were in the last
+                                // lane which is the last iteration of the loop.
+                                break
+                            }
+
+                            // We are not in the last lane, and there is only
+                            // one `NaN` in the vector.
+
+                            // If the vector has one lane, the result is `NaN`:
+                            if $id::lanes() == 1 {
+                                assert!(v.min_element().is_nan(),
+                                        "[C]: all nans | v={:?} | min={} | \
+                                         is_nan: {}",
+                                        v, v.min_element(),
+                                        v.min_element().is_nan()
+                                );
+
+                                // And we are done, since the vector only has
+                                // one lane anyways.
+                                break;
+                            }
+
+                            // The vector has more than one lane, since there is
+                            // only one `NaN` in the vector, the result is
+                            // always `-3`.
+                            assert_eq!(v.min_element(), -3.,
+                                       "[D]: nan at {} => {} | {:?}",
+                                       i, v.min_element(), v);
+
+                            // If we replace all the elements in the vector
+                            // up-to the `i-th` lane with `NaN`s, the result is
+                            // still always `-3.` unless all elements of the
+                            // vector are `NaN`s:
+                            for j in 0..i {
+                                v = v.replace(j, n);
+
+                                if i == $id::lanes() - 1 && j == i - 1 {
+                                    // All elements of the vector are `NaN`s,
+                                    // therefore the result is NaN as well.
+                                    //
+                                    // Note: the #lanes of the vector is > 1, so
+                                    // "i - 1" does not overflow.
+                                    assert!(v.min_element().is_nan(),
+                                            "[E]: all nans | v={:?} | min={} | \
+                                             is_nan: {}",
+                                            v, v.min_element(),
+                                            v.min_element().is_nan());
+                                } else {
+                                    // There are non-`NaN` elements in the
+                                    // vector, therefore the result is `-3.`:
+                                    assert_eq!(v.min_element(), -3.,
+                                               "[F]: nan at {} => {} | {:?}",
+                                               i, v.min_element(), v);
+                                }
+                            }
+                        }
+
+                        // If the vector contains all NaNs the result is NaN:
+                        assert!($id::splat(n).min_element().is_nan(),
+                                "all nans | v={:?} | min={} | is_nan: {}",
+                                $id::splat(n), $id::splat(n).min_element(),
+                                $id::splat(n).min_element().is_nan());
+                    }
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn max_element_test() {
+                        let n = crate::$elem_ty::NAN;
+
+                        assert_eq!(n.max(-3.), -3.);
+                        assert_eq!((-3. as $elem_ty).max(n), -3.);
+
+                        let v0 = $id::splat(-3.);
+
+                        let target_with_broken_last_lane_nan = !cfg!(any(
+                            target_arch = "arm", target_arch = "aarch64",
+                            target_arch = "powerpc64", target_arch = "wasm32",
+                        ));
+
+                        // The vector is initialized to `-3.`s: [-3, -3, -3, -3]
+                        for i in 0..$id::lanes() {
+                            // We replace the i-th element of the vector with
+                            // `NaN`: [-3, -3, -3, NaN]
+                            let mut v = v0.replace(i, n);
+
+                            // If the NaN is in the last place, the LLVM
+                            // implementation of these methods is broken on some
+                            // targets:
+                            if i == $id::lanes() - 1 &&
+                              target_with_broken_last_lane_nan {
+                                // FIXME:
+                                // https://github.com/rust-lang-nursery/packed_simd/issues/5
+                                //
+                                // If there is a NaN, the result should
+                                // always the largest element, but currently
+                                // when the last element is NaN the current
+                                // implementation incorrectly returns NaN.
+                                //
+                                // The targets mentioned above use different
+                                // codegen that produces the correct result.
+                                //
+                                // These asserts detect if this behavior
+                                // changes
+                                assert!(v.max_element().is_nan(),
+                                        // FIXME: ^^^ should be -3.
+                                        "[A]: nan at {} => {} | {:?}",
+                                        i, v.max_element(), v);
+
+                                // If we replace all the elements in the vector
+                                // up-to the `i-th` lane with `NaN`s, the result
+                                // is still always `-3.` unless all elements of
+                                // the vector are `NaN`s:
+                                //
+                                // This is also broken:
+                                for j in 0..i {
+                                    v = v.replace(j, n);
+                                    assert!(v.max_element().is_nan(),
+                                            // FIXME: ^^^ should be -3.
+                                            "[B]: nan at {} => {} | {:?}",
+                                            i, v.max_element(), v);
+                                }
+
+                                // We are done here, since we were in the last
+                                // lane which is the last iteration of the loop.
+                                break
+                            }
+
+                            // We are not in the last lane, and there is only
+                            // one `NaN` in the vector.
+
+                            // If the vector has one lane, the result is `NaN`:
+                            if $id::lanes() == 1 {
+                                assert!(v.max_element().is_nan(),
+                                        "[C]: all nans | v={:?} | min={} | \
+                                         is_nan: {}",
+                                        v, v.max_element(),
+                                        v.max_element().is_nan());
+
+                                // And we are done, since the vector only has
+                                // one lane anyways.
+                                break;
+                            }
+
+                            // The vector has more than one lane, since there is
+                            // only one `NaN` in the vector, the result is
+                            // always `-3`.
+                            assert_eq!(v.max_element(), -3.,
+                                       "[D]: nan at {} => {} | {:?}",
+                                       i, v.max_element(), v);
+
+                            // If we replace all the elements in the vector
+                            // up-to the `i-th` lane with `NaN`s, the result is
+                            // still always `-3.` unless all elements of the
+                            // vector are `NaN`s:
+                            for j in 0..i {
+                                v = v.replace(j, n);
+
+                                if i == $id::lanes() - 1 && j == i - 1 {
+                                    // All elements of the vector are `NaN`s,
+                                    // therefore the result is NaN as well.
+                                    //
+                                    // Note: the #lanes of the vector is > 1, so
+                                    // "i - 1" does not overflow.
+                                    assert!(v.max_element().is_nan(),
+                                            "[E]: all nans | v={:?} | max={} | \
+                                             is_nan: {}",
+                                            v, v.max_element(),
+                                            v.max_element().is_nan());
+                                } else {
+                                    // There are non-`NaN` elements in the
+                                    // vector, therefore the result is `-3.`:
+                                    assert_eq!(v.max_element(), -3.,
+                                               "[F]: nan at {} => {} | {:?}",
+                                               i, v.max_element(), v);
+                                }
+                            }
+                        }
+
+                        // If the vector contains all NaNs the result is NaN:
+                        assert!($id::splat(n).max_element().is_nan(),
+                                "all nans | v={:?} | max={} | is_nan: {}",
+                                $id::splat(n), $id::splat(n).max_element(),
+                                $id::splat(n).max_element().is_nan());
+                    }
+                }
+            }
+        }
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/select.rs.html b/src/packed_simd/api/select.rs.html new file mode 100644 index 000000000..fb55462d9 --- /dev/null +++ b/src/packed_simd/api/select.rs.html @@ -0,0 +1,153 @@ +select.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+
+//! Implements mask's `select`.
+
+/// Implements mask select method
+macro_rules! impl_select {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Selects elements of `a` and `b` using mask.
+            ///
+            /// The lanes of the result for which the mask is `true` contain
+            /// the values of `a`. The remaining lanes contain the values of
+            /// `b`.
+            #[inline]
+            pub fn select<T>(self, a: Simd<T>, b: Simd<T>) -> Simd<T>
+            where
+                T: sealed::SimdArray<
+                    NT = <[$elem_ty; $elem_count] as sealed::SimdArray>::NT,
+                >,
+            {
+                use crate::llvm::simd_select;
+                Simd(unsafe { simd_select(self.0, a.0, b.0) })
+            }
+        }
+
+        test_select!(bool, $id, $id, (false, true) | $test_tt);
+    };
+}
+
+macro_rules! test_select {
+    (
+        $elem_ty:ident,
+        $mask_ty:ident,
+        $vec_ty:ident,($small:expr, $large:expr) |
+        $test_tt:tt
+    ) => {
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$vec_ty _select>] {
+                    use super::*;
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn select() {
+                        let o = $small as $elem_ty;
+                        let t = $large as $elem_ty;
+
+                        let a = $vec_ty::splat(o);
+                        let b = $vec_ty::splat(t);
+                        let m = a.lt(b);
+                        assert_eq!(m.select(a, b), a);
+
+                        let m = b.lt(a);
+                        assert_eq!(m.select(b, a), a);
+
+                        let mut c = a;
+                        let mut d = b;
+                        let mut m_e = $mask_ty::splat(false);
+                        for i in 0..$vec_ty::lanes() {
+                            if i % 2 == 0 {
+                                let c_tmp = c.extract(i);
+                                c = c.replace(i, d.extract(i));
+                                d = d.replace(i, c_tmp);
+                            } else {
+                                m_e = m_e.replace(i, true);
+                            }
+                        }
+
+                        let m = c.lt(d);
+                        assert_eq!(m_e, m);
+                        assert_eq!(m.select(c, d), a);
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/shuffle.rs.html b/src/packed_simd/api/shuffle.rs.html new file mode 100644 index 000000000..4dee2afaa --- /dev/null +++ b/src/packed_simd/api/shuffle.rs.html @@ -0,0 +1,383 @@ +shuffle.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+
+//! Implements portable vector shuffles with immediate indices.
+
+// FIXME: comprehensive tests
+// https://github.com/rust-lang-nursery/packed_simd/issues/20
+
+/// Shuffles vector elements.
+///
+/// This macro returns a new vector that contains a shuffle of the elements in
+/// one (`shuffle!(vec, [indices...])`) or two (`shuffle!(vec0, vec1,
+/// [indices...])`) input vectors.
+///
+/// The type of `vec0` and `vec1` must be equal, and the element type of the
+/// resulting vector is the element type of the input vector.
+///
+/// The number of `indices` must be a power-of-two in range `[0, 64)`, since
+/// currently, the largest vector supported by the library has 64 lanes. The
+/// length of the resulting vector equals the number of indices provided.
+///
+/// The indices must be in range `[0, M * N)` where `M` is the number of input
+/// vectors (`1` or `2`) and `N` is the number of lanes of the input vectors.
+/// The indices `i` in range `[0, N)` refer to the `i`-th element of `vec0`,
+/// while the indices in range `[N, 2*N)` refer to the `i - N`-th element of
+/// `vec1`.
+///
+/// # Examples
+///
+/// Shuffling elements of two vectors:
+///
+/// ```
+/// # #[macro_use]
+/// # extern crate packed_simd;
+/// # use packed_simd::*;
+/// # fn main() {
+/// // Shuffle allows reordering the elements:
+/// let x = i32x4::new(1, 2, 3, 4);
+/// let y = i32x4::new(5, 6, 7, 8);
+/// let r = shuffle!(x, y, [4, 0, 5, 1]);
+/// assert_eq!(r, i32x4::new(5, 1, 6, 2));
+///
+/// // The resulting vector can als be smaller than the input:
+/// let r = shuffle!(x, y, [1, 6]);
+/// assert_eq!(r, i32x2::new(2, 7));
+///
+/// // Or larger:
+/// let r = shuffle!(x, y, [1, 3, 4, 2, 1, 7, 2, 2]);
+/// assert_eq!(r, i32x8::new(2, 4, 5, 3, 2, 8, 3, 3));
+/// // At most 2 * the number of lanes in the input vector.
+/// # }
+/// ```
+///
+/// Shuffling elements of one vector:
+///
+/// ```
+/// # #[macro_use]
+/// # extern crate packed_simd;
+/// # use packed_simd::*;
+/// # fn main() {
+/// // Shuffle allows reordering the elements of a vector:
+/// let x = i32x4::new(1, 2, 3, 4);
+/// let r = shuffle!(x, [2, 1, 3, 0]);
+/// assert_eq!(r, i32x4::new(3, 2, 4, 1));
+///
+/// // The resulting vector can be smaller than the input:
+/// let r = shuffle!(x, [1, 3]);
+/// assert_eq!(r, i32x2::new(2, 4));
+///
+/// // Equal:
+/// let r = shuffle!(x, [1, 3, 2, 0]);
+/// assert_eq!(r, i32x4::new(2, 4, 3, 1));
+///
+/// // Or larger:
+/// let r = shuffle!(x, [1, 3, 2, 2, 1, 3, 2, 2]);
+/// assert_eq!(r, i32x8::new(2, 4, 3, 3, 2, 4, 3, 3));
+/// // At most 2 * the number of lanes in the input vector.
+/// # }
+/// ```
+#[macro_export]
+macro_rules! shuffle {
+    ($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr]) => {{
+        #[allow(unused_unsafe)]
+        unsafe {
+            $crate::Simd($crate::__shuffle_vector2(
+                $vec0.0,
+                $vec1.0,
+                [$l0, $l1],
+            ))
+        }
+    }};
+    ($vec0:expr, $vec1:expr, [$l0:expr, $l1:expr, $l2:expr, $l3:expr]) => {{
+        #[allow(unused_unsafe)]
+        unsafe {
+            $crate::Simd($crate::__shuffle_vector4(
+                $vec0.0,
+                $vec1.0,
+                [$l0, $l1, $l2, $l3],
+            ))
+        }
+    }};
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr]) => {{
+        #[allow(unused_unsafe)]
+        unsafe {
+            $crate::Simd($crate::__shuffle_vector8(
+                $vec0.0,
+                $vec1.0,
+                [$l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7],
+            ))
+        }
+    }};
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr,
+      $l8:expr, $l9:expr, $l10:expr, $l11:expr,
+      $l12:expr, $l13:expr, $l14:expr, $l15:expr]) => {{
+        #[allow(unused_unsafe)]
+        unsafe {
+            $crate::Simd($crate::__shuffle_vector16(
+                $vec0.0,
+                $vec1.0,
+                [
+                    $l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
+                    $l11, $l12, $l13, $l14, $l15,
+                ],
+            ))
+        }
+    }};
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr,
+      $l8:expr, $l9:expr, $l10:expr, $l11:expr,
+      $l12:expr, $l13:expr, $l14:expr, $l15:expr,
+      $l16:expr, $l17:expr, $l18:expr, $l19:expr,
+      $l20:expr, $l21:expr, $l22:expr, $l23:expr,
+      $l24:expr, $l25:expr, $l26:expr, $l27:expr,
+      $l28:expr, $l29:expr, $l30:expr, $l31:expr]) => {{
+        #[allow(unused_unsafe)]
+        unsafe {
+            $crate::Simd($crate::__shuffle_vector32(
+                $vec0.0,
+                $vec1.0,
+                [
+                    $l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
+                    $l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
+                    $l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
+                    $l29, $l30, $l31,
+                ],
+            ))
+        }
+    }};
+    ($vec0:expr, $vec1:expr,
+     [$l0:expr, $l1:expr, $l2:expr, $l3:expr,
+      $l4:expr, $l5:expr, $l6:expr, $l7:expr,
+      $l8:expr, $l9:expr, $l10:expr, $l11:expr,
+      $l12:expr, $l13:expr, $l14:expr, $l15:expr,
+      $l16:expr, $l17:expr, $l18:expr, $l19:expr,
+      $l20:expr, $l21:expr, $l22:expr, $l23:expr,
+      $l24:expr, $l25:expr, $l26:expr, $l27:expr,
+      $l28:expr, $l29:expr, $l30:expr, $l31:expr,
+      $l32:expr, $l33:expr, $l34:expr, $l35:expr,
+      $l36:expr, $l37:expr, $l38:expr, $l39:expr,
+      $l40:expr, $l41:expr, $l42:expr, $l43:expr,
+      $l44:expr, $l45:expr, $l46:expr, $l47:expr,
+      $l48:expr, $l49:expr, $l50:expr, $l51:expr,
+      $l52:expr, $l53:expr, $l54:expr, $l55:expr,
+      $l56:expr, $l57:expr, $l58:expr, $l59:expr,
+      $l60:expr, $l61:expr, $l62:expr, $l63:expr]) => {{
+        #[allow(unused_unsafe)]
+        unsafe {
+            $crate::Simd($crate::__shuffle_vector64(
+                $vec0.0,
+                $vec1.0,
+                [
+                    $l0, $l1, $l2, $l3, $l4, $l5, $l6, $l7, $l8, $l9, $l10,
+                    $l11, $l12, $l13, $l14, $l15, $l16, $l17, $l18, $l19,
+                    $l20, $l21, $l22, $l23, $l24, $l25, $l26, $l27, $l28,
+                    $l29, $l30, $l31, $l32, $l33, $l34, $l35, $l36, $l37,
+                    $l38, $l39, $l40, $l41, $l42, $l43, $l44, $l45, $l46,
+                    $l47, $l48, $l49, $l50, $l51, $l52, $l53, $l54, $l55,
+                    $l56, $l57, $l58, $l59, $l60, $l61, $l62, $l63,
+                ],
+            ))
+        }
+     }};
+    ($vec:expr, [$($l:expr),*]) => {
+        match $vec {
+            v => shuffle!(v, v, [$($l),*])
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/shuffle1_dyn.rs.html b/src/packed_simd/api/shuffle1_dyn.rs.html new file mode 100644 index 000000000..6cb2cbe0c --- /dev/null +++ b/src/packed_simd/api/shuffle1_dyn.rs.html @@ -0,0 +1,321 @@ +shuffle1_dyn.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+
+//! Shuffle vector elements according to a dynamic vector of indices.
+
+macro_rules! impl_shuffle1_dyn {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Shuffle vector elements according to `indices`.
+            #[inline]
+            pub fn shuffle1_dyn<I>(self, indices: I) -> Self
+            where
+                Self: codegen::shuffle1_dyn::Shuffle1Dyn<Indices = I>,
+            {
+                codegen::shuffle1_dyn::Shuffle1Dyn::shuffle1_dyn(self, indices)
+            }
+        }
+    };
+}
+
+macro_rules! test_shuffle1_dyn {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _shuffle1_dyn>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn shuffle1_dyn() {
+                        let increasing = {
+                            let mut v = $id::splat(0 as $elem_ty);
+                            for i in 0..$id::lanes() {
+                                v = v.replace(i, i as $elem_ty);
+                            }
+                            v
+                        };
+                        let decreasing = {
+                            let mut v = $id::splat(0 as $elem_ty);
+                            for i in 0..$id::lanes() {
+                                v = v.replace(
+                                    i,
+                                    ($id::lanes() - 1 - i) as $elem_ty
+                                );
+                            }
+                            v
+                        };
+
+                        type Indices = <
+                            $id as codegen::shuffle1_dyn::Shuffle1Dyn
+                            >::Indices;
+                        let increasing_ids: Indices = increasing.cast();
+                        let decreasing_ids: Indices = decreasing.cast();
+
+                        assert_eq!(
+                            increasing.shuffle1_dyn(increasing_ids),
+                            increasing,
+                            "(i,i)=>i"
+                        );
+                        assert_eq!(
+                            decreasing.shuffle1_dyn(increasing_ids),
+                            decreasing,
+                            "(d,i)=>d"
+                        );
+                        assert_eq!(
+                            increasing.shuffle1_dyn(decreasing_ids),
+                            decreasing,
+                            "(i,d)=>d"
+                        );
+                        assert_eq!(
+                            decreasing.shuffle1_dyn(decreasing_ids),
+                            increasing,
+                            "(d,d)=>i"
+                        );
+
+                        for i in 0..$id::lanes() {
+                            let v_ids: Indices
+                                = $id::splat(i as $elem_ty).cast();
+                            assert_eq!(increasing.shuffle1_dyn(v_ids),
+                                       $id::splat(increasing.extract(i))
+                            );
+                            assert_eq!(decreasing.shuffle1_dyn(v_ids),
+                                       $id::splat(decreasing.extract(i))
+                            );
+                            assert_eq!(
+                                $id::splat(i as $elem_ty)
+                                    .shuffle1_dyn(increasing_ids),
+                                $id::splat(i as $elem_ty)
+                            );
+                            assert_eq!(
+                                $id::splat(i as $elem_ty)
+                                    .shuffle1_dyn(decreasing_ids),
+                                $id::splat(i as $elem_ty)
+                            );
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+macro_rules! test_shuffle1_dyn_mask {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _shuffle1_dyn>] {
+                    use super::*;
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn shuffle1_dyn() {
+                        // alternating = [true, false, true, false, ...]
+                        let mut alternating = $id::splat(false);
+                        for i in 0..$id::lanes() {
+                            if i % 2 == 0 {
+                                alternating = alternating.replace(i, true);
+                            }
+                        }
+
+                        type Indices = <
+                            $id as codegen::shuffle1_dyn::Shuffle1Dyn
+                            >::Indices;
+                        // even = [0, 0, 2, 2, 4, 4, ..]
+                        let even = {
+                            let mut v = Indices::splat(0);
+                            for i in 0..$id::lanes() {
+                                if i % 2 == 0 {
+                                    v = v.replace(i, (i as u8).into());
+                                } else {
+                                    v = v.replace(i, (i as u8 - 1).into());
+                                }
+                            }
+                            v
+                        };
+                        // odd = [1, 1, 3, 3, 5, 5, ...]
+                        let odd = {
+                            let mut v = Indices::splat(0);
+                            for i in 0..$id::lanes() {
+                                if i % 2 != 0 {
+                                    v = v.replace(i, (i as u8).into());
+                                } else {
+                                    v = v.replace(i, (i as u8 + 1).into());
+                                }
+                            }
+                            v
+                        };
+
+                        assert_eq!(
+                            alternating.shuffle1_dyn(even),
+                            $id::splat(true)
+                        );
+                        if $id::lanes() > 1 {
+                            assert_eq!(
+                                alternating.shuffle1_dyn(odd),
+                                $id::splat(false)
+                            );
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/slice.rs.html b/src/packed_simd/api/slice.rs.html new file mode 100644 index 000000000..090f75123 --- /dev/null +++ b/src/packed_simd/api/slice.rs.html @@ -0,0 +1,17 @@ +slice.rs.html -- source
1
+2
+3
+4
+5
+6
+7
+
+//! Slice from/to methods
+
+#[macro_use]
+mod from_slice;
+
+#[macro_use]
+mod write_to_slice;
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/slice/from_slice.rs.html b/src/packed_simd/api/slice/from_slice.rs.html new file mode 100644 index 000000000..23a6c2b38 --- /dev/null +++ b/src/packed_simd/api/slice/from_slice.rs.html @@ -0,0 +1,435 @@ +from_slice.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+
+//! Implements methods to read a vector type from a slice.
+
+macro_rules! impl_slice_from_slice {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned
+            /// to an `align_of::<Self>()` boundary.
+            #[inline]
+            pub fn from_slice_aligned(slice: &[$elem_ty]) -> Self {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    let target_ptr = slice.get_unchecked(0) as *const $elem_ty;
+                    assert_eq!(
+                        target_ptr
+                            .align_offset(crate::mem::align_of::<Self>()),
+                        0
+                    );
+                    Self::from_slice_aligned_unchecked(slice)
+                }
+            }
+
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()`.
+            #[inline]
+            pub fn from_slice_unaligned(slice: &[$elem_ty]) -> Self {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    Self::from_slice_unaligned_unchecked(slice)
+                }
+            }
+
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not aligned
+            /// to an `align_of::<Self>()` boundary, the behavior is undefined.
+            #[inline]
+            pub unsafe fn from_slice_aligned_unchecked(
+                slice: &[$elem_ty],
+            ) -> Self {
+                debug_assert!(slice.len() >= $elem_count);
+                let target_ptr = slice.get_unchecked(0) as *const $elem_ty;
+                debug_assert_eq!(
+                    target_ptr.align_offset(crate::mem::align_of::<Self>()),
+                    0
+                );
+
+                #[allow(clippy::cast_ptr_alignment)]
+                *(target_ptr as *const Self)
+            }
+
+            /// Instantiates a new vector with the values of the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` the behavior is undefined.
+            #[inline]
+            pub unsafe fn from_slice_unaligned_unchecked(
+                slice: &[$elem_ty],
+            ) -> Self {
+                use crate::mem::size_of;
+                debug_assert!(slice.len() >= $elem_count);
+                let target_ptr =
+                    slice.get_unchecked(0) as *const $elem_ty as *const u8;
+                let mut x = Self::splat(0 as $elem_ty);
+                let self_ptr = &mut x as *mut Self as *mut u8;
+                crate::ptr::copy_nonoverlapping(
+                    target_ptr,
+                    self_ptr,
+                    size_of::<Self>(),
+                );
+                x
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _slice_from_slice>] {
+                    use super::*;
+                    use crate::iter::Iterator;
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn from_slice_unaligned() {
+                        let mut unaligned = [42 as $elem_ty; $id::lanes() + 1];
+                        unaligned[0] = 0 as $elem_ty;
+                        let vec = $id::from_slice_unaligned(&unaligned[1..]);
+                        for (index, &b) in unaligned.iter().enumerate() {
+                            if index == 0 {
+                                assert_eq!(b, 0 as $elem_ty);
+                            } else {
+                                assert_eq!(b, 42 as $elem_ty);
+                                assert_eq!(b, vec.extract(index - 1));
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn from_slice_unaligned_fail() {
+                        let mut unaligned = [42 as $elem_ty; $id::lanes() + 1];
+                        unaligned[0] = 0 as $elem_ty;
+                        // the slice is not large enough => panic
+                        let _vec = $id::from_slice_unaligned(&unaligned[2..]);
+                    }
+
+                    union A {
+                        data: [$elem_ty; 2 * $id::lanes()],
+                        _vec: $id,
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn from_slice_aligned() {
+                        let mut aligned = A {
+                            data: [0 as $elem_ty; 2 * $id::lanes()],
+                        };
+                        for i in $id::lanes()..(2 * $id::lanes()) {
+                            unsafe {
+                                aligned.data[i] = 42 as $elem_ty;
+                            }
+                        }
+
+                        let vec = unsafe {
+                            $id::from_slice_aligned(
+                                &aligned.data[$id::lanes()..]
+                            )
+                        };
+                        for (index, &b) in
+                            unsafe { aligned.data.iter().enumerate() } {
+                            if index < $id::lanes() {
+                                assert_eq!(b, 0 as $elem_ty);
+                            } else {
+                                assert_eq!(b, 42 as $elem_ty);
+                                assert_eq!(
+                                    b, vec.extract(index - $id::lanes())
+                                );
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn from_slice_aligned_fail_lanes() {
+                        let aligned = A {
+                            data: [0 as $elem_ty; 2 * $id::lanes()],
+                        };
+                        let _vec = unsafe {
+                            $id::from_slice_aligned(
+                                &aligned.data[2 * $id::lanes()..]
+                            )
+                        };
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn from_slice_aligned_fail_align() {
+                        unsafe {
+                            let aligned = A {
+                                data: [0 as $elem_ty; 2 * $id::lanes()],
+                            };
+
+                            // get a pointer to the front of data
+                            let ptr: *const $elem_ty = aligned.data.as_ptr()
+                                as *const $elem_ty;
+                            // offset pointer by one element
+                            let ptr = ptr.wrapping_add(1);
+
+                            if ptr.align_offset(
+                                crate::mem::align_of::<$id>()
+                            ) == 0 {
+                                // the pointer is properly aligned, so
+                                // from_slice_aligned won't fail here (e.g. this
+                                // can happen for i128x1). So we panic to make
+                                // the "should_fail" test pass:
+                                panic!("ok");
+                            }
+
+                            // create a slice - this is safe, because the
+                            // elements of the slice exist, are properly
+                            // initialized, and properly aligned:
+                            let s: &[$elem_ty] = slice::from_raw_parts(
+                                ptr, $id::lanes()
+                            );
+                            // this should always panic because the slice
+                            // alignment does not match the alignment
+                            // requirements for the vector type:
+                            let _vec = $id::from_slice_aligned(s);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/slice/write_to_slice.rs.html b/src/packed_simd/api/slice/write_to_slice.rs.html new file mode 100644 index 000000000..2ccf6fbe7 --- /dev/null +++ b/src/packed_simd/api/slice/write_to_slice.rs.html @@ -0,0 +1,425 @@ +write_to_slice.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+
+//! Implements methods to write a vector type to a slice.
+
+macro_rules! impl_slice_write_to_slice {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not
+            /// aligned to an `align_of::<Self>()` boundary.
+            #[inline]
+            pub fn write_to_slice_aligned(self, slice: &mut [$elem_ty]) {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    let target_ptr =
+                        slice.get_unchecked_mut(0) as *mut $elem_ty;
+                    assert_eq!(
+                        target_ptr
+                            .align_offset(crate::mem::align_of::<Self>()),
+                        0
+                    );
+                    self.write_to_slice_aligned_unchecked(slice);
+                }
+            }
+
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Panics
+            ///
+            /// If `slice.len() < Self::lanes()`.
+            #[inline]
+            pub fn write_to_slice_unaligned(self, slice: &mut [$elem_ty]) {
+                unsafe {
+                    assert!(slice.len() >= $elem_count);
+                    self.write_to_slice_unaligned_unchecked(slice);
+                }
+            }
+
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` or `&slice[0]` is not
+            /// aligned to an `align_of::<Self>()` boundary, the behavior is
+            /// undefined.
+            #[inline]
+            pub unsafe fn write_to_slice_aligned_unchecked(
+                self, slice: &mut [$elem_ty],
+            ) {
+                debug_assert!(slice.len() >= $elem_count);
+                let target_ptr = slice.get_unchecked_mut(0) as *mut $elem_ty;
+                debug_assert_eq!(
+                    target_ptr.align_offset(crate::mem::align_of::<Self>()),
+                    0
+                );
+
+                                #[allow(clippy::cast_ptr_alignment)]
+                        #[allow(clippy::cast_ptr_alignment)]
+                #[allow(clippy::cast_ptr_alignment)]
+                #[allow(clippy::cast_ptr_alignment)]
+                *(target_ptr as *mut Self) = self;
+            }
+
+            /// Writes the values of the vector to the `slice`.
+            ///
+            /// # Precondition
+            ///
+            /// If `slice.len() < Self::lanes()` the behavior is undefined.
+            #[inline]
+            pub unsafe fn write_to_slice_unaligned_unchecked(
+                self, slice: &mut [$elem_ty],
+            ) {
+                debug_assert!(slice.len() >= $elem_count);
+                let target_ptr =
+                    slice.get_unchecked_mut(0) as *mut $elem_ty as *mut u8;
+                let self_ptr = &self as *const Self as *const u8;
+                crate::ptr::copy_nonoverlapping(
+                    self_ptr,
+                    target_ptr,
+                    crate::mem::size_of::<Self>(),
+                );
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item! {
+                pub mod [<$id _slice_write_to_slice>] {
+                    use super::*;
+                    use crate::iter::Iterator;
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn write_to_slice_unaligned() {
+                        let mut unaligned = [0 as $elem_ty; $id::lanes() + 1];
+                        let vec = $id::splat(42 as $elem_ty);
+                        vec.write_to_slice_unaligned(&mut unaligned[1..]);
+                        for (index, &b) in unaligned.iter().enumerate() {
+                            if index == 0 {
+                                assert_eq!(b, 0 as $elem_ty);
+                            } else {
+                                assert_eq!(b, 42 as $elem_ty);
+                                assert_eq!(b, vec.extract(index - 1));
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn write_to_slice_unaligned_fail() {
+                        let mut unaligned = [0 as $elem_ty; $id::lanes() + 1];
+                        let vec = $id::splat(42 as $elem_ty);
+                        vec.write_to_slice_unaligned(&mut unaligned[2..]);
+                    }
+
+                    union A {
+                        data: [$elem_ty; 2 * $id::lanes()],
+                        _vec: $id,
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn write_to_slice_aligned() {
+                        let mut aligned = A {
+                            data: [0 as $elem_ty; 2 * $id::lanes()],
+                        };
+                        let vec = $id::splat(42 as $elem_ty);
+                        unsafe {
+                            vec.write_to_slice_aligned(
+                                &mut aligned.data[$id::lanes()..]
+                            );
+                            for (idx, &b) in aligned.data.iter().enumerate() {
+                                if idx < $id::lanes() {
+                                    assert_eq!(b, 0 as $elem_ty);
+                                } else {
+                                    assert_eq!(b, 42 as $elem_ty);
+                                    assert_eq!(
+                                        b, vec.extract(idx - $id::lanes())
+                                    );
+                                }
+                            }
+                        }
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn write_to_slice_aligned_fail_lanes() {
+                        let mut aligned = A {
+                            data: [0 as $elem_ty; 2 * $id::lanes()],
+                        };
+                        let vec = $id::splat(42 as $elem_ty);
+                        unsafe {
+                            vec.write_to_slice_aligned(
+                                &mut aligned.data[2 * $id::lanes()..]
+                            )
+                        };
+                    }
+
+                    // FIXME: wasm-bindgen-test does not support #[should_panic]
+                    // #[cfg_attr(not(target_arch = "wasm32"), test)]
+                    // #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    #[cfg(not(target_arch = "wasm32"))]
+                    #[test]
+                    #[should_panic]
+                    fn write_to_slice_aligned_fail_align() {
+                        unsafe {
+                            let mut aligned = A {
+                                data: [0 as $elem_ty; 2 * $id::lanes()],
+                            };
+
+                            // get a pointer to the front of data
+                            let ptr: *mut $elem_ty
+                                = aligned.data.as_mut_ptr() as *mut $elem_ty;
+                            // offset pointer by one element
+                            let ptr = ptr.wrapping_add(1);
+
+                            if ptr.align_offset(crate::mem::align_of::<$id>())
+                                == 0 {
+                                // the pointer is properly aligned, so
+                                // write_to_slice_aligned won't fail here (e.g.
+                                // this can happen for i128x1). So we panic to
+                                // make the "should_fail" test pass:
+                                panic!("ok");
+                            }
+
+                            // create a slice - this is safe, because the
+                            // elements of the slice exist, are properly
+                            // initialized, and properly aligned:
+                            let s: &mut [$elem_ty]
+                                = slice::from_raw_parts_mut(ptr, $id::lanes());
+                            // this should always panic because the slice
+                            // alignment does not match the alignment
+                            // requirements for the vector type:
+                            let vec = $id::splat(42 as $elem_ty);
+                            vec.write_to_slice_aligned(s);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/api/swap_bytes.rs.html b/src/packed_simd/api/swap_bytes.rs.html new file mode 100644 index 000000000..aa5fbbffb --- /dev/null +++ b/src/packed_simd/api/swap_bytes.rs.html @@ -0,0 +1,387 @@ +swap_bytes.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+
+//! Horizontal swap bytes
+
+macro_rules! impl_swap_bytes {
+    ([$elem_ty:ident; $elem_count:expr]: $id:ident | $test_tt:tt) => {
+        impl $id {
+            /// Reverses the byte order of the vector.
+            #[inline]
+            pub fn swap_bytes(self) -> Self {
+                super::codegen::swap_bytes::SwapBytes::swap_bytes(self)
+            }
+
+            /// Converts self to little endian from the target's endianness.
+            ///
+            /// On little endian this is a no-op. On big endian the bytes are
+            /// swapped.
+            #[inline]
+            pub fn to_le(self) -> Self {
+                #[cfg(target_endian = "little")]
+                {
+                    self
+                }
+                #[cfg(not(target_endian = "little"))]
+                {
+                    self.swap_bytes()
+                }
+            }
+
+            /// Converts self to big endian from the target's endianness.
+            ///
+            /// On big endian this is a no-op. On little endian the bytes are
+            /// swapped.
+            #[inline]
+            pub fn to_be(self) -> Self {
+                #[cfg(target_endian = "big")]
+                {
+                    self
+                }
+                #[cfg(not(target_endian = "big"))]
+                {
+                    self.swap_bytes()
+                }
+            }
+
+            /// Converts a vector from little endian to the target's endianness.
+            ///
+            /// On little endian this is a no-op. On big endian the bytes are
+            /// swapped.
+            #[inline]
+            pub fn from_le(x: Self) -> Self {
+                #[cfg(target_endian = "little")]
+                {
+                    x
+                }
+                #[cfg(not(target_endian = "little"))]
+                {
+                    x.swap_bytes()
+                }
+            }
+
+            /// Converts a vector from big endian to the target's endianness.
+            ///
+            /// On big endian this is a no-op. On little endian the bytes are
+            /// swapped.
+            #[inline]
+            pub fn from_be(x: Self) -> Self {
+                #[cfg(target_endian = "big")]
+                {
+                    x
+                }
+                #[cfg(not(target_endian = "big"))]
+                {
+                    x.swap_bytes()
+                }
+            }
+        }
+
+        test_if! {
+            $test_tt:
+            paste::item_with_macros! {
+                pub mod [<$id _swap_bytes>] {
+                    use super::*;
+
+                    const BYTES: [u8; 64] = [
+                        0, 1, 2, 3, 4, 5, 6, 7,
+                        8, 9, 10, 11, 12, 13, 14, 15,
+                        16, 17, 18, 19, 20, 21, 22, 23,
+                        24, 25, 26, 27, 28, 29, 30, 31,
+                        32, 33, 34, 35, 36, 37, 38, 39,
+                        40, 41, 42, 43, 44, 45, 46, 47,
+                        48, 49, 50, 51, 52, 53, 54, 55,
+                        56, 57, 58, 59, 60, 61, 62, 63,
+                    ];
+
+                    macro_rules! swap {
+                        ($func: ident) => {{
+                            // catch possible future >512 vectors
+                            assert!(mem::size_of::<$id>() <= 64);
+
+                            let mut actual = BYTES;
+                            let elems: &mut [$elem_ty] = unsafe {
+                                slice::from_raw_parts_mut(
+                                    actual.as_mut_ptr() as *mut $elem_ty,
+                                    $id::lanes(),
+                                )
+                            };
+
+                            let vec = $id::from_slice_unaligned(elems);
+                            $id::$func(vec).write_to_slice_unaligned(elems);
+
+                            actual
+                        }};
+                    }
+
+                    macro_rules! test_swap {
+                        ($func: ident) => {{
+                            let actual = swap!($func);
+                            let expected =
+                                BYTES.iter().rev()
+                                .skip(64 - crate::mem::size_of::<$id>());
+                            assert!(actual.iter().zip(expected)
+                                    .all(|(x, y)| x == y));
+                        }};
+                    }
+
+                    macro_rules! test_no_swap {
+                        ($func: ident) => {{
+                            let actual = swap!($func);
+                            let expected = BYTES.iter()
+                                .take(mem::size_of::<$id>());
+
+                            assert!(actual.iter().zip(expected)
+                                    .all(|(x, y)| x == y));
+                        }};
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn swap_bytes() {
+                        test_swap!(swap_bytes);
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn to_le() {
+                        #[cfg(target_endian = "little")]
+                        {
+                            test_no_swap!(to_le);
+                        }
+                        #[cfg(not(target_endian = "little"))]
+                        {
+                            test_swap!(to_le);
+                        }
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn to_be() {
+                        #[cfg(target_endian = "big")]
+                        {
+                            test_no_swap!(to_be);
+                        }
+                        #[cfg(not(target_endian = "big"))]
+                        {
+                            test_swap!(to_be);
+                        }
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn from_le() {
+                        #[cfg(target_endian = "little")]
+                        {
+                            test_no_swap!(from_le);
+                        }
+                        #[cfg(not(target_endian = "little"))]
+                        {
+                            test_swap!(from_le);
+                        }
+                    }
+
+                    #[cfg_attr(not(target_arch = "wasm32"), test)] #[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
+                    fn from_be() {
+                        #[cfg(target_endian = "big")]
+                        {
+                            test_no_swap!(from_be);
+                        }
+                        #[cfg(not(target_endian = "big"))]
+                        {
+                            test_swap!(from_be);
+                        }
+                    }
+                }
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen.rs.html b/src/packed_simd/codegen.rs.html new file mode 100644 index 000000000..a6bf2443e --- /dev/null +++ b/src/packed_simd/codegen.rs.html @@ -0,0 +1,127 @@ +codegen.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+
+//! Code-generation utilities
+
+crate mod bit_manip;
+crate mod llvm;
+crate mod math;
+crate mod reductions;
+crate mod shuffle;
+crate mod shuffle1_dyn;
+crate mod swap_bytes;
+
+macro_rules! impl_simd_array {
+    ([$elem_ty:ident; $elem_count:expr]:
+     $tuple_id:ident | $($elem_tys:ident),*) => {
+        #[derive(Copy, Clone)]
+        #[repr(simd)]
+        pub struct $tuple_id($(crate $elem_tys),*);
+        //^^^^^^^ leaked through SimdArray
+
+        impl crate::sealed::Seal for [$elem_ty; $elem_count] {}
+
+        impl crate::sealed::SimdArray for [$elem_ty; $elem_count] {
+            type Tuple = $tuple_id;
+            type T = $elem_ty;
+            const N: usize = $elem_count;
+            type NT = [u32; $elem_count];
+        }
+
+        impl crate::sealed::Seal for $tuple_id {}
+        impl crate::sealed::Simd for $tuple_id {
+            type Element = $elem_ty;
+            const LANES: usize = $elem_count;
+            type LanesType = [u32; $elem_count];
+        }
+
+    }
+}
+
+crate mod pointer_sized_int;
+
+crate mod v16;
+crate use self::v16::*;
+
+crate mod v32;
+crate use self::v32::*;
+
+crate mod v64;
+crate use self::v64::*;
+
+crate mod v128;
+crate use self::v128::*;
+
+crate mod v256;
+crate use self::v256::*;
+
+crate mod v512;
+crate use self::v512::*;
+
+crate mod vSize;
+crate use self::vSize::*;
+
+crate mod vPtr;
+crate use self::vPtr::*;
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/bit_manip.rs.html b/src/packed_simd/codegen/bit_manip.rs.html new file mode 100644 index 000000000..a473ca511 --- /dev/null +++ b/src/packed_simd/codegen/bit_manip.rs.html @@ -0,0 +1,711 @@ +bit_manip.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+
+//! LLVM bit manipulation intrinsics.
+#![rustfmt::skip]
+
+use crate::*;
+
+#[allow(improper_ctypes, dead_code)]
+extern "C" {
+    #[link_name = "llvm.ctlz.v2i8"]
+    fn ctlz_u8x2(x: u8x2, is_zero_undef: bool) -> u8x2;
+    #[link_name = "llvm.ctlz.v4i8"]
+    fn ctlz_u8x4(x: u8x4, is_zero_undef: bool) -> u8x4;
+    #[link_name = "llvm.ctlz.v8i8"]
+    fn ctlz_u8x8(x: u8x8, is_zero_undef: bool) -> u8x8;
+    #[link_name = "llvm.ctlz.v16i8"]
+    fn ctlz_u8x16(x: u8x16, is_zero_undef: bool) -> u8x16;
+    #[link_name = "llvm.ctlz.v32i8"]
+    fn ctlz_u8x32(x: u8x32, is_zero_undef: bool) -> u8x32;
+    #[link_name = "llvm.ctlz.v64i8"]
+    fn ctlz_u8x64(x: u8x64, is_zero_undef: bool) -> u8x64;
+
+    #[link_name = "llvm.ctlz.v2i16"]
+    fn ctlz_u16x2(x: u16x2, is_zero_undef: bool) -> u16x2;
+    #[link_name = "llvm.ctlz.v4i16"]
+    fn ctlz_u16x4(x: u16x4, is_zero_undef: bool) -> u16x4;
+    #[link_name = "llvm.ctlz.v8i16"]
+    fn ctlz_u16x8(x: u16x8, is_zero_undef: bool) -> u16x8;
+    #[link_name = "llvm.ctlz.v16i16"]
+    fn ctlz_u16x16(x: u16x16, is_zero_undef: bool) -> u16x16;
+    #[link_name = "llvm.ctlz.v32i16"]
+    fn ctlz_u16x32(x: u16x32, is_zero_undef: bool) -> u16x32;
+
+    #[link_name = "llvm.ctlz.v2i32"]
+    fn ctlz_u32x2(x: u32x2, is_zero_undef: bool) -> u32x2;
+    #[link_name = "llvm.ctlz.v4i32"]
+    fn ctlz_u32x4(x: u32x4, is_zero_undef: bool) -> u32x4;
+    #[link_name = "llvm.ctlz.v8i32"]
+    fn ctlz_u32x8(x: u32x8, is_zero_undef: bool) -> u32x8;
+    #[link_name = "llvm.ctlz.v16i32"]
+    fn ctlz_u32x16(x: u32x16, is_zero_undef: bool) -> u32x16;
+
+    #[link_name = "llvm.ctlz.v2i64"]
+    fn ctlz_u64x2(x: u64x2, is_zero_undef: bool) -> u64x2;
+    #[link_name = "llvm.ctlz.v4i64"]
+    fn ctlz_u64x4(x: u64x4, is_zero_undef: bool) -> u64x4;
+    #[link_name = "llvm.ctlz.v8i64"]
+    fn ctlz_u64x8(x: u64x8, is_zero_undef: bool) -> u64x8;
+
+    #[link_name = "llvm.ctlz.v1i128"]
+    fn ctlz_u128x1(x: u128x1, is_zero_undef: bool) -> u128x1;
+    #[link_name = "llvm.ctlz.v2i128"]
+    fn ctlz_u128x2(x: u128x2, is_zero_undef: bool) -> u128x2;
+    #[link_name = "llvm.ctlz.v4i128"]
+    fn ctlz_u128x4(x: u128x4, is_zero_undef: bool) -> u128x4;
+
+    #[link_name = "llvm.cttz.v2i8"]
+    fn cttz_u8x2(x: u8x2, is_zero_undef: bool) -> u8x2;
+    #[link_name = "llvm.cttz.v4i8"]
+    fn cttz_u8x4(x: u8x4, is_zero_undef: bool) -> u8x4;
+    #[link_name = "llvm.cttz.v8i8"]
+    fn cttz_u8x8(x: u8x8, is_zero_undef: bool) -> u8x8;
+    #[link_name = "llvm.cttz.v16i8"]
+    fn cttz_u8x16(x: u8x16, is_zero_undef: bool) -> u8x16;
+    #[link_name = "llvm.cttz.v32i8"]
+    fn cttz_u8x32(x: u8x32, is_zero_undef: bool) -> u8x32;
+    #[link_name = "llvm.cttz.v64i8"]
+    fn cttz_u8x64(x: u8x64, is_zero_undef: bool) -> u8x64;
+
+    #[link_name = "llvm.cttz.v2i16"]
+    fn cttz_u16x2(x: u16x2, is_zero_undef: bool) -> u16x2;
+    #[link_name = "llvm.cttz.v4i16"]
+    fn cttz_u16x4(x: u16x4, is_zero_undef: bool) -> u16x4;
+    #[link_name = "llvm.cttz.v8i16"]
+    fn cttz_u16x8(x: u16x8, is_zero_undef: bool) -> u16x8;
+    #[link_name = "llvm.cttz.v16i16"]
+    fn cttz_u16x16(x: u16x16, is_zero_undef: bool) -> u16x16;
+    #[link_name = "llvm.cttz.v32i16"]
+    fn cttz_u16x32(x: u16x32, is_zero_undef: bool) -> u16x32;
+
+    #[link_name = "llvm.cttz.v2i32"]
+    fn cttz_u32x2(x: u32x2, is_zero_undef: bool) -> u32x2;
+    #[link_name = "llvm.cttz.v4i32"]
+    fn cttz_u32x4(x: u32x4, is_zero_undef: bool) -> u32x4;
+    #[link_name = "llvm.cttz.v8i32"]
+    fn cttz_u32x8(x: u32x8, is_zero_undef: bool) -> u32x8;
+    #[link_name = "llvm.cttz.v16i32"]
+    fn cttz_u32x16(x: u32x16, is_zero_undef: bool) -> u32x16;
+
+    #[link_name = "llvm.cttz.v2i64"]
+    fn cttz_u64x2(x: u64x2, is_zero_undef: bool) -> u64x2;
+    #[link_name = "llvm.cttz.v4i64"]
+    fn cttz_u64x4(x: u64x4, is_zero_undef: bool) -> u64x4;
+    #[link_name = "llvm.cttz.v8i64"]
+    fn cttz_u64x8(x: u64x8, is_zero_undef: bool) -> u64x8;
+
+    #[link_name = "llvm.cttz.v1i128"]
+    fn cttz_u128x1(x: u128x1, is_zero_undef: bool) -> u128x1;
+    #[link_name = "llvm.cttz.v2i128"]
+    fn cttz_u128x2(x: u128x2, is_zero_undef: bool) -> u128x2;
+    #[link_name = "llvm.cttz.v4i128"]
+    fn cttz_u128x4(x: u128x4, is_zero_undef: bool) -> u128x4;
+
+    #[link_name = "llvm.ctpop.v2i8"]
+    fn ctpop_u8x2(x: u8x2) -> u8x2;
+    #[link_name = "llvm.ctpop.v4i8"]
+    fn ctpop_u8x4(x: u8x4) -> u8x4;
+    #[link_name = "llvm.ctpop.v8i8"]
+    fn ctpop_u8x8(x: u8x8) -> u8x8;
+    #[link_name = "llvm.ctpop.v16i8"]
+    fn ctpop_u8x16(x: u8x16) -> u8x16;
+    #[link_name = "llvm.ctpop.v32i8"]
+    fn ctpop_u8x32(x: u8x32) -> u8x32;
+    #[link_name = "llvm.ctpop.v64i8"]
+    fn ctpop_u8x64(x: u8x64) -> u8x64;
+
+    #[link_name = "llvm.ctpop.v2i16"]
+    fn ctpop_u16x2(x: u16x2) -> u16x2;
+    #[link_name = "llvm.ctpop.v4i16"]
+    fn ctpop_u16x4(x: u16x4) -> u16x4;
+    #[link_name = "llvm.ctpop.v8i16"]
+    fn ctpop_u16x8(x: u16x8) -> u16x8;
+    #[link_name = "llvm.ctpop.v16i16"]
+    fn ctpop_u16x16(x: u16x16) -> u16x16;
+    #[link_name = "llvm.ctpop.v32i16"]
+    fn ctpop_u16x32(x: u16x32) -> u16x32;
+
+    #[link_name = "llvm.ctpop.v2i32"]
+    fn ctpop_u32x2(x: u32x2) -> u32x2;
+    #[link_name = "llvm.ctpop.v4i32"]
+    fn ctpop_u32x4(x: u32x4) -> u32x4;
+    #[link_name = "llvm.ctpop.v8i32"]
+    fn ctpop_u32x8(x: u32x8) -> u32x8;
+    #[link_name = "llvm.ctpop.v16i32"]
+    fn ctpop_u32x16(x: u32x16) -> u32x16;
+
+    #[link_name = "llvm.ctpop.v2i64"]
+    fn ctpop_u64x2(x: u64x2) -> u64x2;
+    #[link_name = "llvm.ctpop.v4i64"]
+    fn ctpop_u64x4(x: u64x4) -> u64x4;
+    #[link_name = "llvm.ctpop.v8i64"]
+    fn ctpop_u64x8(x: u64x8) -> u64x8;
+
+    #[link_name = "llvm.ctpop.v1i128"]
+    fn ctpop_u128x1(x: u128x1) -> u128x1;
+    #[link_name = "llvm.ctpop.v2i128"]
+    fn ctpop_u128x2(x: u128x2) -> u128x2;
+    #[link_name = "llvm.ctpop.v4i128"]
+    fn ctpop_u128x4(x: u128x4) -> u128x4;
+}
+
+crate trait BitManip {
+    fn ctpop(self) -> Self;
+    fn ctlz(self) -> Self;
+    fn cttz(self) -> Self;
+}
+
+macro_rules! impl_bit_manip {
+    (inner: $ty:ident, $scalar:ty, $uty:ident,
+     $ctpop:ident, $ctlz:ident, $cttz:ident) => {
+        // FIXME: several LLVM intrinsics break on s390x https://github.com/rust-lang-nursery/packed_simd/issues/192
+        #[cfg(target_arch = "s390x")]
+        impl_bit_manip! { scalar: $ty, $scalar }
+        #[cfg(not(target_arch = "s390x"))]
+        impl BitManip for $ty {
+            #[inline]
+            fn ctpop(self) -> Self {
+                let y: $uty = self.cast();
+                unsafe { $ctpop(y).cast() }
+            }
+
+            #[inline]
+            fn ctlz(self) -> Self {
+                let y: $uty = self.cast();
+                // the ctxx intrinsics need compile-time constant
+                // `is_zero_undef`
+                unsafe { $ctlz(y, false).cast() }
+            }
+
+            #[inline]
+            fn cttz(self) -> Self {
+                let y: $uty = self.cast();
+                unsafe { $cttz(y, false).cast() }
+            }
+        }
+    };
+    (sized_inner: $ty:ident, $scalar:ty, $uty:ident) => {
+        #[cfg(target_arch = "s390x")]
+        impl_bit_manip! { scalar: $ty, $scalar }
+        #[cfg(not(target_arch = "s390x"))]
+        impl BitManip for $ty {
+            #[inline]
+            fn ctpop(self) -> Self {
+                let y: $uty = self.cast();
+                $uty::ctpop(y).cast()
+            }
+
+            #[inline]
+            fn ctlz(self) -> Self {
+                let y: $uty = self.cast();
+                $uty::ctlz(y).cast()
+            }
+
+            #[inline]
+            fn cttz(self) -> Self {
+                let y: $uty = self.cast();
+                $uty::cttz(y).cast()
+            }
+        }
+    };
+    (scalar: $ty:ident, $scalar:ty) => {
+        impl BitManip for $ty {
+            #[inline]
+            fn ctpop(self) -> Self {
+                let mut ones = self;
+                for i in 0..Self::lanes() {
+                    ones = ones
+                        .replace(i, self.extract(i).count_ones() as $scalar);
+                }
+                ones
+            }
+
+            #[inline]
+            fn ctlz(self) -> Self {
+                let mut lz = self;
+                for i in 0..Self::lanes() {
+                    lz = lz.replace(
+                        i,
+                        self.extract(i).leading_zeros() as $scalar,
+                    );
+                }
+                lz
+            }
+
+            #[inline]
+            fn cttz(self) -> Self {
+                let mut tz = self;
+                for i in 0..Self::lanes() {
+                    tz = tz.replace(
+                        i,
+                        self.extract(i).trailing_zeros() as $scalar,
+                    );
+                }
+                tz
+            }
+        }
+    };
+    ($uty:ident, $uscalar:ty, $ity:ident, $iscalar:ty,
+     $ctpop:ident, $ctlz:ident, $cttz:ident) => {
+        impl_bit_manip! { inner: $uty, $uscalar, $uty, $ctpop, $ctlz, $cttz }
+        impl_bit_manip! { inner: $ity, $iscalar, $uty, $ctpop, $ctlz, $cttz }
+    };
+    (sized: $usize:ident, $uscalar:ty, $isize:ident,
+     $iscalar:ty, $ty:ident) => {
+        impl_bit_manip! { sized_inner: $usize, $uscalar, $ty }
+        impl_bit_manip! { sized_inner: $isize, $iscalar, $ty }
+    };
+}
+
+impl_bit_manip! { u8x2   ,   u8, i8x2, i8,   ctpop_u8x2,   ctlz_u8x2,   cttz_u8x2   }
+impl_bit_manip! { u8x4   ,   u8, i8x4, i8,   ctpop_u8x4,   ctlz_u8x4,   cttz_u8x4   }
+#[cfg(not(target_arch = "aarch64"))] // see below
+impl_bit_manip! { u8x8   ,   u8, i8x8, i8,   ctpop_u8x8,   ctlz_u8x8,   cttz_u8x8   }
+impl_bit_manip! { u8x16  ,  u8, i8x16, i8,  ctpop_u8x16,  ctlz_u8x16,  cttz_u8x16  }
+impl_bit_manip! { u8x32  ,  u8, i8x32, i8,  ctpop_u8x32,  ctlz_u8x32,  cttz_u8x32  }
+impl_bit_manip! { u8x64  ,  u8, i8x64, i8,  ctpop_u8x64,  ctlz_u8x64,  cttz_u8x64  }
+impl_bit_manip! { u16x2  ,  u16, i16x2, i16,  ctpop_u16x2,  ctlz_u16x2,  cttz_u16x2  }
+impl_bit_manip! { u16x4  ,  u16, i16x4, i16,  ctpop_u16x4,  ctlz_u16x4,  cttz_u16x4  }
+impl_bit_manip! { u16x8  ,  u16, i16x8, i16,  ctpop_u16x8,  ctlz_u16x8,  cttz_u16x8  }
+impl_bit_manip! { u16x16 , u16, i16x16, i16, ctpop_u16x16, ctlz_u16x16, cttz_u16x16 }
+impl_bit_manip! { u16x32 , u16, i16x32, i16, ctpop_u16x32, ctlz_u16x32, cttz_u16x32 }
+impl_bit_manip! { u32x2  ,  u32, i32x2, i32,  ctpop_u32x2,  ctlz_u32x2,  cttz_u32x2  }
+impl_bit_manip! { u32x4  ,  u32, i32x4, i32,  ctpop_u32x4,  ctlz_u32x4,  cttz_u32x4  }
+impl_bit_manip! { u32x8  ,  u32, i32x8, i32,  ctpop_u32x8,  ctlz_u32x8,  cttz_u32x8  }
+impl_bit_manip! { u32x16 , u32, i32x16, i32, ctpop_u32x16, ctlz_u32x16, cttz_u32x16 }
+impl_bit_manip! { u64x2  ,  u64, i64x2, i64,  ctpop_u64x2,  ctlz_u64x2,  cttz_u64x2  }
+impl_bit_manip! { u64x4  ,  u64, i64x4, i64,  ctpop_u64x4,  ctlz_u64x4,  cttz_u64x4  }
+impl_bit_manip! { u64x8  ,  u64, i64x8, i64,  ctpop_u64x8,  ctlz_u64x8,  cttz_u64x8  }
+impl_bit_manip! { u128x1 , u128, i128x1, i128, ctpop_u128x1, ctlz_u128x1, cttz_u128x1 }
+impl_bit_manip! { u128x2 , u128, i128x2, i128, ctpop_u128x2, ctlz_u128x2, cttz_u128x2 }
+impl_bit_manip! { u128x4 , u128, i128x4, i128, ctpop_u128x4, ctlz_u128x4, cttz_u128x4 }
+
+#[cfg(target_arch = "aarch64")]
+impl BitManip for u8x8 {
+    #[inline]
+    fn ctpop(self) -> Self {
+        let y: u8x8 = self.cast();
+        unsafe { ctpop_u8x8(y).cast() }
+    }
+
+    #[inline]
+    fn ctlz(self) -> Self {
+        let y: u8x8 = self.cast();
+        unsafe { ctlz_u8x8(y, false).cast() }
+    }
+
+    #[inline]
+    fn cttz(self) -> Self {
+        // FIXME: LLVM cttz.v8i8 broken on aarch64 https://github.com/rust-lang-nursery/packed_simd/issues/191
+        // OPTIMIZE: adapt the algorithm used for v8i16/etc to Rust's aarch64
+        // intrinsics
+        let mut tz = self;
+        for i in 0..Self::lanes() {
+            tz = tz.replace(i, self.extract(i).trailing_zeros() as u8);
+        }
+        tz
+    }
+}
+#[cfg(target_arch = "aarch64")]
+impl BitManip for i8x8 {
+    #[inline]
+    fn ctpop(self) -> Self {
+        let y: u8x8 = self.cast();
+        unsafe { ctpop_u8x8(y).cast() }
+    }
+
+    #[inline]
+    fn ctlz(self) -> Self {
+        let y: u8x8 = self.cast();
+        unsafe { ctlz_u8x8(y, false).cast() }
+    }
+
+    #[inline]
+    fn cttz(self) -> Self {
+        // FIXME: LLVM cttz.v8i8 broken on aarch64 https://github.com/rust-lang-nursery/packed_simd/issues/191
+        // OPTIMIZE: adapt the algorithm used for v8i16/etc to Rust's aarch64
+        // intrinsics
+        let mut tz = self;
+        for i in 0..Self::lanes() {
+            tz = tz.replace(i, self.extract(i).trailing_zeros() as i8);
+        }
+        tz
+    }
+}
+
+cfg_if! {
+    if #[cfg(target_pointer_width = "8")] {
+        impl_bit_manip! { sized: usizex2, usize, isizex2, isize, u8x2 }
+        impl_bit_manip! { sized: usizex4, usize, isizex4, isize, u8x4 }
+        impl_bit_manip! { sized: usizex8, usize, isizex8, isize, u8x8 }
+    } else if #[cfg(target_pointer_width = "16")] {
+        impl_bit_manip! { sized: usizex2, usize, isizex2, isize, u16x2 }
+        impl_bit_manip! { sized: usizex4, usize, isizex4, isize, u16x4 }
+        impl_bit_manip! { sized: usizex8, usize, isizex8, isize, u16x8 }
+    } else if #[cfg(target_pointer_width = "32")] {
+        impl_bit_manip! { sized: usizex2, usize, isizex2, isize, u32x2 }
+        impl_bit_manip! { sized: usizex4, usize, isizex4, isize, u32x4 }
+        impl_bit_manip! { sized: usizex8, usize, isizex8, isize, u32x8 }
+    } else if #[cfg(target_pointer_width = "64")] {
+        impl_bit_manip! { sized: usizex2, usize, isizex2, isize, u64x2 }
+        impl_bit_manip! { sized: usizex4, usize, isizex4, isize, u64x4 }
+        impl_bit_manip! { sized: usizex8, usize, isizex8, isize, u64x8 }
+    } else {
+        compile_error!("unsupported target_pointer_width");
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/llvm.rs.html b/src/packed_simd/codegen/llvm.rs.html new file mode 100644 index 000000000..a344a49a9 --- /dev/null +++ b/src/packed_simd/codegen/llvm.rs.html @@ -0,0 +1,205 @@ +llvm.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+
+//! LLVM's platform intrinsics
+#![allow(dead_code)]
+
+use crate::sealed::Shuffle;
+#[allow(unused_imports)] // FIXME: spurious warning?
+use crate::sealed::Simd;
+
+// Shuffle intrinsics: expanded in users' crates, therefore public.
+extern "platform-intrinsic" {
+    // FIXME: Passing this intrinsics an `idx` array with an index that is
+    // out-of-bounds will produce a monomorphization-time error.
+    // https://github.com/rust-lang-nursery/packed_simd/issues/21
+    pub fn simd_shuffle2<T, U>(x: T, y: T, idx: [u32; 2]) -> U
+    where
+        T: Simd,
+        <T as Simd>::Element: Shuffle<[u32; 2], Output = U>;
+
+    pub fn simd_shuffle4<T, U>(x: T, y: T, idx: [u32; 4]) -> U
+    where
+        T: Simd,
+        <T as Simd>::Element: Shuffle<[u32; 4], Output = U>;
+
+    pub fn simd_shuffle8<T, U>(x: T, y: T, idx: [u32; 8]) -> U
+    where
+        T: Simd,
+        <T as Simd>::Element: Shuffle<[u32; 8], Output = U>;
+
+    pub fn simd_shuffle16<T, U>(x: T, y: T, idx: [u32; 16]) -> U
+    where
+        T: Simd,
+        <T as Simd>::Element: Shuffle<[u32; 16], Output = U>;
+
+    pub fn simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+    where
+        T: Simd,
+        <T as Simd>::Element: Shuffle<[u32; 32], Output = U>;
+
+    pub fn simd_shuffle64<T, U>(x: T, y: T, idx: [u32; 64]) -> U
+    where
+        T: Simd,
+        <T as Simd>::Element: Shuffle<[u32; 64], Output = U>;
+}
+
+pub use self::simd_shuffle16 as __shuffle_vector16;
+pub use self::simd_shuffle2 as __shuffle_vector2;
+pub use self::simd_shuffle32 as __shuffle_vector32;
+pub use self::simd_shuffle4 as __shuffle_vector4;
+pub use self::simd_shuffle64 as __shuffle_vector64;
+pub use self::simd_shuffle8 as __shuffle_vector8;
+
+extern "platform-intrinsic" {
+    crate fn simd_eq<T, U>(x: T, y: T) -> U;
+    crate fn simd_ne<T, U>(x: T, y: T) -> U;
+    crate fn simd_lt<T, U>(x: T, y: T) -> U;
+    crate fn simd_le<T, U>(x: T, y: T) -> U;
+    crate fn simd_gt<T, U>(x: T, y: T) -> U;
+    crate fn simd_ge<T, U>(x: T, y: T) -> U;
+
+    crate fn simd_insert<T, U>(x: T, idx: u32, val: U) -> T;
+    crate fn simd_extract<T, U>(x: T, idx: u32) -> U;
+
+    crate fn simd_cast<T, U>(x: T) -> U;
+
+    crate fn simd_add<T>(x: T, y: T) -> T;
+    crate fn simd_sub<T>(x: T, y: T) -> T;
+    crate fn simd_mul<T>(x: T, y: T) -> T;
+    crate fn simd_div<T>(x: T, y: T) -> T;
+    crate fn simd_rem<T>(x: T, y: T) -> T;
+    crate fn simd_shl<T>(x: T, y: T) -> T;
+    crate fn simd_shr<T>(x: T, y: T) -> T;
+    crate fn simd_and<T>(x: T, y: T) -> T;
+    crate fn simd_or<T>(x: T, y: T) -> T;
+    crate fn simd_xor<T>(x: T, y: T) -> T;
+
+    crate fn simd_reduce_add_unordered<T, U>(x: T) -> U;
+    crate fn simd_reduce_mul_unordered<T, U>(x: T) -> U;
+    crate fn simd_reduce_add_ordered<T, U>(x: T, acc: U) -> U;
+    crate fn simd_reduce_mul_ordered<T, U>(x: T, acc: U) -> U;
+    crate fn simd_reduce_min<T, U>(x: T) -> U;
+    crate fn simd_reduce_max<T, U>(x: T) -> U;
+    crate fn simd_reduce_min_nanless<T, U>(x: T) -> U;
+    crate fn simd_reduce_max_nanless<T, U>(x: T) -> U;
+    crate fn simd_reduce_and<T, U>(x: T) -> U;
+    crate fn simd_reduce_or<T, U>(x: T) -> U;
+    crate fn simd_reduce_xor<T, U>(x: T) -> U;
+    crate fn simd_reduce_all<T>(x: T) -> bool;
+    crate fn simd_reduce_any<T>(x: T) -> bool;
+
+    crate fn simd_select<M, T>(m: M, a: T, b: T) -> T;
+
+    crate fn simd_fmin<T>(a: T, b: T) -> T;
+    crate fn simd_fmax<T>(a: T, b: T) -> T;
+
+    crate fn simd_fsqrt<T>(a: T) -> T;
+    crate fn simd_fma<T>(a: T, b: T, c: T) -> T;
+
+    crate fn simd_gather<T, P, M>(value: T, pointers: P, mask: M) -> T;
+    crate fn simd_scatter<T, P, M>(value: T, pointers: P, mask: M);
+
+    crate fn simd_bitmask<T, U>(value: T) -> U;
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math.rs.html b/src/packed_simd/codegen/math.rs.html new file mode 100644 index 000000000..cc887ed11 --- /dev/null +++ b/src/packed_simd/codegen/math.rs.html @@ -0,0 +1,9 @@ +math.rs.html -- source
1
+2
+3
+
+//! Vertical math operations
+
+crate mod float;
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float.rs.html b/src/packed_simd/codegen/math/float.rs.html new file mode 100644 index 000000000..f3c17bb43 --- /dev/null +++ b/src/packed_simd/codegen/math/float.rs.html @@ -0,0 +1,41 @@ +float.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+
+//! Vertical floating-point math operations.
+#![allow(clippy::useless_transmute)]
+
+#[macro_use]
+crate mod macros;
+crate mod abs;
+crate mod cos;
+crate mod cos_pi;
+crate mod exp;
+crate mod ln;
+crate mod mul_add;
+crate mod mul_adde;
+crate mod powf;
+crate mod sin;
+crate mod sin_cos_pi;
+crate mod sin_pi;
+crate mod sqrt;
+crate mod sqrte;
+crate mod tanh;
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/abs.rs.html b/src/packed_simd/codegen/math/float/abs.rs.html new file mode 100644 index 000000000..d6f8a81ef --- /dev/null +++ b/src/packed_simd/codegen/math/float/abs.rs.html @@ -0,0 +1,209 @@ +abs.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+
+//! Vertical floating-point `fabs`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors fabs
+
+use crate::*;
+
+crate trait Abs {
+    fn abs(self) -> Self;
+}
+
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.fabs.v2f32"]
+    fn fabs_v2f32(x: f32x2) -> f32x2;
+    #[link_name = "llvm.fabs.v4f32"]
+    fn fabs_v4f32(x: f32x4) -> f32x4;
+    #[link_name = "llvm.fabs.v8f32"]
+    fn fabs_v8f32(x: f32x8) -> f32x8;
+    #[link_name = "llvm.fabs.v16f32"]
+    fn fabs_v16f32(x: f32x16) -> f32x16;
+    /* FIXME 64-bit fabsgle elem vectors
+    #[link_name = "llvm.fabs.v1f64"]
+    fn fabs_v1f64(x: f64x1) -> f64x1;
+     */
+    #[link_name = "llvm.fabs.v2f64"]
+    fn fabs_v2f64(x: f64x2) -> f64x2;
+    #[link_name = "llvm.fabs.v4f64"]
+    fn fabs_v4f64(x: f64x4) -> f64x4;
+    #[link_name = "llvm.fabs.v8f64"]
+    fn fabs_v8f64(x: f64x8) -> f64x8;
+
+    #[link_name = "llvm.fabs.f32"]
+    fn fabs_f32(x: f32) -> f32;
+    #[link_name = "llvm.fabs.f64"]
+    fn fabs_f64(x: f64) -> f64;
+}
+
+gen_unary_impl_table!(Abs, abs);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_unary!(f32x2[f32; 2]: fabs_f32);
+        impl_unary!(f32x4[f32; 4]: fabs_f32);
+        impl_unary!(f32x8[f32; 8]: fabs_f32);
+        impl_unary!(f32x16[f32; 16]: fabs_f32);
+
+        impl_unary!(f64x2[f64; 2]: fabs_f64);
+        impl_unary!(f64x4[f64; 4]: fabs_f64);
+        impl_unary!(f64x8[f64; 8]: fabs_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_fabsf4_avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_fabsf8_avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_fabsd4_avx2);
+
+                impl_unary!(f32x4: Sleef_fabsf4_avx2128);
+                impl_unary!(f32x8: Sleef_fabsf8_avx2);
+                impl_unary!(f64x2: Sleef_fabsd2_avx2128);
+                impl_unary!(f64x4: Sleef_fabsd4_avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_fabsf4_sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_fabsf8_avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_fabsd4_avx);
+
+                impl_unary!(f32x4: Sleef_fabsf4_sse4);
+                impl_unary!(f32x8: Sleef_fabsf8_avx);
+                impl_unary!(f64x2: Sleef_fabsd2_sse4);
+                impl_unary!(f64x4: Sleef_fabsd4_avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_fabsf4_sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_fabsf4_sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_fabsd2_sse4);
+
+                impl_unary!(f32x4: Sleef_fabsf4_sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_fabsf4_sse4);
+                impl_unary!(f64x2: Sleef_fabsd2_sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_fabsd2_sse4);
+            } else {
+                impl_unary!(f32x2[f32; 2]: fabs_f32);
+                impl_unary!(f32x16: fabs_v16f32);
+                impl_unary!(f64x8: fabs_v8f64);
+
+                impl_unary!(f32x4: fabs_v4f32);
+                impl_unary!(f32x8: fabs_v8f32);
+                impl_unary!(f64x2: fabs_v2f64);
+                impl_unary!(f64x4: fabs_v4f64);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[f32; 2]: fabs_f32);
+        impl_unary!(f32x4: fabs_v4f32);
+        impl_unary!(f32x8: fabs_v8f32);
+        impl_unary!(f32x16: fabs_v16f32);
+
+        impl_unary!(f64x2: fabs_v2f64);
+        impl_unary!(f64x4: fabs_v4f64);
+        impl_unary!(f64x8: fabs_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/cos.rs.html b/src/packed_simd/codegen/math/float/cos.rs.html new file mode 100644 index 000000000..84e528626 --- /dev/null +++ b/src/packed_simd/codegen/math/float/cos.rs.html @@ -0,0 +1,209 @@ +cos.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+
+//! Vertical floating-point `cos`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vector cos
+
+use crate::*;
+
+crate trait Cos {
+    fn cos(self) -> Self;
+}
+
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.cos.v2f32"]
+    fn cos_v2f32(x: f32x2) -> f32x2;
+    #[link_name = "llvm.cos.v4f32"]
+    fn cos_v4f32(x: f32x4) -> f32x4;
+    #[link_name = "llvm.cos.v8f32"]
+    fn cos_v8f32(x: f32x8) -> f32x8;
+    #[link_name = "llvm.cos.v16f32"]
+    fn cos_v16f32(x: f32x16) -> f32x16;
+    /* FIXME 64-bit cosgle elem vectors
+    #[link_name = "llvm.cos.v1f64"]
+    fn cos_v1f64(x: f64x1) -> f64x1;
+     */
+    #[link_name = "llvm.cos.v2f64"]
+    fn cos_v2f64(x: f64x2) -> f64x2;
+    #[link_name = "llvm.cos.v4f64"]
+    fn cos_v4f64(x: f64x4) -> f64x4;
+    #[link_name = "llvm.cos.v8f64"]
+    fn cos_v8f64(x: f64x8) -> f64x8;
+
+    #[link_name = "llvm.cos.f32"]
+    fn cos_f32(x: f32) -> f32;
+    #[link_name = "llvm.cos.f64"]
+    fn cos_f64(x: f64) -> f64;
+}
+
+gen_unary_impl_table!(Cos, cos);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_unary!(f32x2[f32; 2]: cos_f32);
+        impl_unary!(f32x4[f32; 4]: cos_f32);
+        impl_unary!(f32x8[f32; 8]: cos_f32);
+        impl_unary!(f32x16[f32; 16]: cos_f32);
+
+        impl_unary!(f64x2[f64; 2]: cos_f64);
+        impl_unary!(f64x4[f64; 4]: cos_f64);
+        impl_unary!(f64x8[f64; 8]: cos_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_cosf4_u10avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_cosf8_u10avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_cosd4_u10avx2);
+
+                impl_unary!(f32x4: Sleef_cosf4_u10avx2128);
+                impl_unary!(f32x8: Sleef_cosf8_u10avx2);
+                impl_unary!(f64x2: Sleef_cosd2_u10avx2128);
+                impl_unary!(f64x4: Sleef_cosd4_u10avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_cosf4_u10sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_cosf8_u10avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_cosd4_u10avx);
+
+                impl_unary!(f32x4: Sleef_cosf4_u10sse4);
+                impl_unary!(f32x8: Sleef_cosf8_u10avx);
+                impl_unary!(f64x2: Sleef_cosd2_u10sse4);
+                impl_unary!(f64x4: Sleef_cosd4_u10avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_cosf4_u10sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_cosf4_u10sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_cosd2_u10sse4);
+
+                impl_unary!(f32x4: Sleef_cosf4_u10sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_cosf4_u10sse4);
+                impl_unary!(f64x2: Sleef_cosd2_u10sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_cosd2_u10sse4);
+            } else {
+                impl_unary!(f32x2[f32; 2]: cos_f32);
+                impl_unary!(f32x16: cos_v16f32);
+                impl_unary!(f64x8: cos_v8f64);
+
+                impl_unary!(f32x4: cos_v4f32);
+                impl_unary!(f32x8: cos_v8f32);
+                impl_unary!(f64x2: cos_v2f64);
+                impl_unary!(f64x4: cos_v4f64);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[f32; 2]: cos_f32);
+        impl_unary!(f32x4: cos_v4f32);
+        impl_unary!(f32x8: cos_v8f32);
+        impl_unary!(f32x16: cos_v16f32);
+
+        impl_unary!(f64x2: cos_v2f64);
+        impl_unary!(f64x4: cos_v4f64);
+        impl_unary!(f64x8: cos_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/cos_pi.rs.html b/src/packed_simd/codegen/math/float/cos_pi.rs.html new file mode 100644 index 000000000..d756094ea --- /dev/null +++ b/src/packed_simd/codegen/math/float/cos_pi.rs.html @@ -0,0 +1,177 @@ +cos_pi.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+
+//! Vertical floating-point `cos`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors cos_pi
+
+use crate::*;
+
+crate trait CosPi {
+    fn cos_pi(self) -> Self;
+}
+
+gen_unary_impl_table!(CosPi, cos_pi);
+
+macro_rules! impl_def {
+    ($vid:ident, $PI:path) => {
+        impl CosPi for $vid {
+            #[inline]
+            fn cos_pi(self) -> Self {
+                (self * Self::splat($PI)).cos()
+            }
+        }
+    };
+}
+macro_rules! impl_def32 {
+    ($vid:ident) => {
+        impl_def!($vid, crate::f32::consts::PI);
+    };
+}
+macro_rules! impl_def64 {
+    ($vid:ident) => {
+        impl_def!($vid, crate::f64::consts::PI);
+    };
+}
+
+cfg_if! {
+    if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_cospif4_u05avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_cospif8_u05avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_cospid4_u05avx2);
+
+                impl_unary!(f32x4: Sleef_cospif4_u05avx2128);
+                impl_unary!(f32x8: Sleef_cospif8_u05avx2);
+                impl_unary!(f64x2: Sleef_cospid2_u05avx2128);
+                impl_unary!(f64x4: Sleef_cospid4_u05avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_cospif4_u05sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_cospif8_u05avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_cospid4_u05avx);
+
+                impl_unary!(f32x4: Sleef_cospif4_u05sse4);
+                impl_unary!(f32x8: Sleef_cospif8_u05avx);
+                impl_unary!(f64x2: Sleef_cospid2_u05sse4);
+                impl_unary!(f64x4: Sleef_cospid4_u05avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_cospif4_u05sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_cospif4_u05sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_cospid2_u05sse4);
+
+                impl_unary!(f32x4: Sleef_cospif4_u05sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_cospif4_u05sse4);
+                impl_unary!(f64x2: Sleef_cospid2_u05sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_cospid2_u05sse4);
+            } else {
+                impl_def32!(f32x2);
+                impl_def32!(f32x4);
+                impl_def32!(f32x8);
+                impl_def32!(f32x16);
+
+                impl_def64!(f64x2);
+                impl_def64!(f64x4);
+                impl_def64!(f64x8);
+            }
+        }
+    } else {
+        impl_def32!(f32x2);
+        impl_def32!(f32x4);
+        impl_def32!(f32x8);
+        impl_def32!(f32x16);
+
+        impl_def64!(f64x2);
+        impl_def64!(f64x4);
+        impl_def64!(f64x8);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/exp.rs.html b/src/packed_simd/codegen/math/float/exp.rs.html new file mode 100644 index 000000000..9e3d27457 --- /dev/null +++ b/src/packed_simd/codegen/math/float/exp.rs.html @@ -0,0 +1,227 @@ +exp.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+
+//! Vertical floating-point `exp`
+#![allow(unused)]
+
+// FIXME 64-bit expgle elem vectors misexpg
+
+use crate::*;
+
+crate trait Exp {
+    fn exp(self) -> Self;
+}
+
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.exp.v2f32"]
+    fn exp_v2f32(x: f32x2) -> f32x2;
+    #[link_name = "llvm.exp.v4f32"]
+    fn exp_v4f32(x: f32x4) -> f32x4;
+    #[link_name = "llvm.exp.v8f32"]
+    fn exp_v8f32(x: f32x8) -> f32x8;
+    #[link_name = "llvm.exp.v16f32"]
+    fn exp_v16f32(x: f32x16) -> f32x16;
+    /* FIXME 64-bit expgle elem vectors
+    #[link_name = "llvm.exp.v1f64"]
+    fn exp_v1f64(x: f64x1) -> f64x1;
+     */
+    #[link_name = "llvm.exp.v2f64"]
+    fn exp_v2f64(x: f64x2) -> f64x2;
+    #[link_name = "llvm.exp.v4f64"]
+    fn exp_v4f64(x: f64x4) -> f64x4;
+    #[link_name = "llvm.exp.v8f64"]
+    fn exp_v8f64(x: f64x8) -> f64x8;
+
+    #[link_name = "llvm.exp.f32"]
+    fn exp_f32(x: f32) -> f32;
+    #[link_name = "llvm.exp.f64"]
+    fn exp_f64(x: f64) -> f64;
+}
+
+gen_unary_impl_table!(Exp, exp);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_unary!(f32x2[f32; 2]: exp_f32);
+        impl_unary!(f32x4[f32; 4]: exp_f32);
+        impl_unary!(f32x8[f32; 8]: exp_f32);
+        impl_unary!(f32x16[f32; 16]: exp_f32);
+
+        impl_unary!(f64x2[f64; 2]: exp_f64);
+        impl_unary!(f64x4[f64; 4]: exp_f64);
+        impl_unary!(f64x8[f64; 8]: exp_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_expf4_u10avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_expf8_u10avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_expd4_u10avx2);
+
+                impl_unary!(f32x4: Sleef_expf4_u10avx2128);
+                impl_unary!(f32x8: Sleef_expf8_u10avx2);
+                impl_unary!(f64x2: Sleef_expd2_u10avx2128);
+                impl_unary!(f64x4: Sleef_expd4_u10avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_expf4_u10sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_expf8_u10avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_expd4_u10avx);
+
+                impl_unary!(f32x4: Sleef_expf4_u10sse4);
+                impl_unary!(f32x8: Sleef_expf8_u10avx);
+                impl_unary!(f64x2: Sleef_expd2_u10sse4);
+                impl_unary!(f64x4: Sleef_expd4_u10avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_expf4_u10sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_expf4_u10sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_expd2_u10sse4);
+
+                impl_unary!(f32x4: Sleef_expf4_u10sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_expf4_u10sse4);
+                impl_unary!(f64x2: Sleef_expd2_u10sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_expd2_u10sse4);
+            } else if #[cfg(target_feature = "sse2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_expf4_u10sse2);
+                impl_unary!(f32x16[q => f32x4]: Sleef_expf4_u10sse2);
+                impl_unary!(f64x8[q => f64x2]: Sleef_expd2_u10sse2);
+
+                impl_unary!(f32x4: Sleef_expf4_u10sse2);
+                impl_unary!(f32x8[h => f32x4]: Sleef_expf4_u10sse2);
+                impl_unary!(f64x2: Sleef_expd2_u10sse2);
+                impl_unary!(f64x4[h => f64x2]: Sleef_expd2_u10sse2);
+            } else {
+                impl_unary!(f32x2[f32; 2]: exp_f32);
+                impl_unary!(f32x16: exp_v16f32);
+                impl_unary!(f64x8: exp_v8f64);
+
+                impl_unary!(f32x4: exp_v4f32);
+                impl_unary!(f32x8: exp_v8f32);
+                impl_unary!(f64x2: exp_v2f64);
+                impl_unary!(f64x4: exp_v4f64);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[f32; 2]: exp_f32);
+        impl_unary!(f32x4: exp_v4f32);
+        impl_unary!(f32x8: exp_v8f32);
+        impl_unary!(f32x16: exp_v16f32);
+
+        impl_unary!(f64x2: exp_v2f64);
+        impl_unary!(f64x4: exp_v4f64);
+        impl_unary!(f64x8: exp_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/ln.rs.html b/src/packed_simd/codegen/math/float/ln.rs.html new file mode 100644 index 000000000..d307a8cf3 --- /dev/null +++ b/src/packed_simd/codegen/math/float/ln.rs.html @@ -0,0 +1,227 @@ +ln.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+
+//! Vertical floating-point `ln`
+#![allow(unused)]
+
+// FIXME 64-bit lngle elem vectors mislng
+
+use crate::*;
+
+crate trait Ln {
+    fn ln(self) -> Self;
+}
+
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.log.v2f32"]
+    fn ln_v2f32(x: f32x2) -> f32x2;
+    #[link_name = "llvm.log.v4f32"]
+    fn ln_v4f32(x: f32x4) -> f32x4;
+    #[link_name = "llvm.log.v8f32"]
+    fn ln_v8f32(x: f32x8) -> f32x8;
+    #[link_name = "llvm.log.v16f32"]
+    fn ln_v16f32(x: f32x16) -> f32x16;
+    /* FIXME 64-bit lngle elem vectors
+    #[link_name = "llvm.log.v1f64"]
+    fn ln_v1f64(x: f64x1) -> f64x1;
+     */
+    #[link_name = "llvm.log.v2f64"]
+    fn ln_v2f64(x: f64x2) -> f64x2;
+    #[link_name = "llvm.log.v4f64"]
+    fn ln_v4f64(x: f64x4) -> f64x4;
+    #[link_name = "llvm.log.v8f64"]
+    fn ln_v8f64(x: f64x8) -> f64x8;
+
+    #[link_name = "llvm.log.f32"]
+    fn ln_f32(x: f32) -> f32;
+    #[link_name = "llvm.log.f64"]
+    fn ln_f64(x: f64) -> f64;
+}
+
+gen_unary_impl_table!(Ln, ln);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_unary!(f32x2[f32; 2]: ln_f32);
+        impl_unary!(f32x4[f32; 4]: ln_f32);
+        impl_unary!(f32x8[f32; 8]: ln_f32);
+        impl_unary!(f32x16[f32; 16]: ln_f32);
+
+        impl_unary!(f64x2[f64; 2]: ln_f64);
+        impl_unary!(f64x4[f64; 4]: ln_f64);
+        impl_unary!(f64x8[f64; 8]: ln_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_logf4_u10avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_logf8_u10avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_logd4_u10avx2);
+
+                impl_unary!(f32x4: Sleef_logf4_u10avx2128);
+                impl_unary!(f32x8: Sleef_logf8_u10avx2);
+                impl_unary!(f64x2: Sleef_logd2_u10avx2128);
+                impl_unary!(f64x4: Sleef_logd4_u10avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_logf4_u10sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_logf8_u10avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_logd4_u10avx);
+
+                impl_unary!(f32x4: Sleef_logf4_u10sse4);
+                impl_unary!(f32x8: Sleef_logf8_u10avx);
+                impl_unary!(f64x2: Sleef_logd2_u10sse4);
+                impl_unary!(f64x4: Sleef_logd4_u10avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_logf4_u10sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_logf4_u10sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_logd2_u10sse4);
+
+                impl_unary!(f32x4: Sleef_logf4_u10sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_logf4_u10sse4);
+                impl_unary!(f64x2: Sleef_logd2_u10sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_logd2_u10sse4);
+            } else if #[cfg(target_feature = "sse2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_logf4_u10sse2);
+                impl_unary!(f32x16[q => f32x4]: Sleef_logf4_u10sse2);
+                impl_unary!(f64x8[q => f64x2]: Sleef_logd2_u10sse2);
+
+                impl_unary!(f32x4: Sleef_logf4_u10sse2);
+                impl_unary!(f32x8[h => f32x4]: Sleef_logf4_u10sse2);
+                impl_unary!(f64x2: Sleef_logd2_u10sse2);
+                impl_unary!(f64x4[h => f64x2]: Sleef_logd2_u10sse2);
+            } else {
+                impl_unary!(f32x2[f32; 2]: ln_f32);
+                impl_unary!(f32x16: ln_v16f32);
+                impl_unary!(f64x8: ln_v8f64);
+
+                impl_unary!(f32x4: ln_v4f32);
+                impl_unary!(f32x8: ln_v8f32);
+                impl_unary!(f64x2: ln_v2f64);
+                impl_unary!(f64x4: ln_v4f64);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[f32; 2]: ln_f32);
+        impl_unary!(f32x4: ln_v4f32);
+        impl_unary!(f32x8: ln_v8f32);
+        impl_unary!(f32x16: ln_v16f32);
+
+        impl_unary!(f64x2: ln_v2f64);
+        impl_unary!(f64x4: ln_v4f64);
+        impl_unary!(f64x8: ln_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/macros.rs.html b/src/packed_simd/codegen/math/float/macros.rs.html new file mode 100644 index 000000000..ff18ccd47 --- /dev/null +++ b/src/packed_simd/codegen/math/float/macros.rs.html @@ -0,0 +1,1121 @@ +macros.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
+464
+465
+466
+467
+468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
+503
+504
+505
+506
+507
+508
+509
+510
+511
+512
+513
+514
+515
+516
+517
+518
+519
+520
+521
+522
+523
+524
+525
+526
+527
+528
+529
+530
+531
+532
+533
+534
+535
+536
+537
+538
+539
+540
+541
+542
+543
+544
+545
+546
+547
+548
+549
+550
+551
+552
+553
+554
+555
+556
+557
+558
+559
+
+//! Utility macros
+#![allow(unused)]
+
+
+macro_rules! impl_unary_ {
+    // implementation mapping 1:1
+    (vec | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    transmute($fun(transmute(self)))
+                }
+            }
+        }
+    };
+    // implementation mapping 1:1 for when `$fun` is a generic function
+    // like some of the fp math rustc intrinsics (e.g. `fn fun<T>(x: T) -> T`).
+    (gen | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    transmute($fun(self.0))
+                }
+            }
+        }
+    };
+    (scalar | $trait_id:ident, $trait_method:ident,
+     $vec_id:ident, [$sid:ident; $scount:expr], $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self) -> Self {
+                unsafe {
+                    union U {
+                        vec: $vec_id,
+                        scalars: [$sid; $scount],
+                    }
+                    let mut scalars = U { vec: self }.scalars;
+                    for i in &mut scalars {
+                        *i = $fun(*i);
+                    }
+                    U { scalars }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun twice on each of the vector halves:
+    (halves | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vech_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    union U {
+                        vec: $vec_id,
+                        halves: [$vech_id; 2],
+                    }
+
+                    let mut halves = U { vec: self }.halves;
+
+                    *halves.get_unchecked_mut(0) =
+                        transmute($fun(transmute(*halves.get_unchecked(0))));
+                    *halves.get_unchecked_mut(1) =
+                        transmute($fun(transmute(*halves.get_unchecked(1))));
+
+                    U { halves }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun four times on each of the vector quarters:
+    (quarter | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vecq_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    union U {
+                        vec: $vec_id,
+                        quarters: [$vecq_id; 4],
+                    }
+
+                    let mut quarters = U { vec: self }.quarters;
+
+                    *quarters.get_unchecked_mut(0) =
+                        transmute($fun(transmute(*quarters.get_unchecked(0))));
+                    *quarters.get_unchecked_mut(1) =
+                        transmute($fun(transmute(*quarters.get_unchecked(1))));
+                    *quarters.get_unchecked_mut(2) =
+                        transmute($fun(transmute(*quarters.get_unchecked(2))));
+                    *quarters.get_unchecked_mut(3) =
+                        transmute($fun(transmute(*quarters.get_unchecked(3))));
+
+                    U { quarters }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun once on a vector twice as large:
+    (twice | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vect_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self) -> Self {
+                unsafe {
+                    use crate::mem::{transmute, uninitialized};
+
+                    union U {
+                        vec: [$vec_id; 2],
+                        twice: $vect_id,
+                    }
+
+                    let twice = U { vec: [self, uninitialized()] }.twice;
+                    let twice = transmute($fun(transmute(twice)));
+
+                    *(U { twice }.vec.get_unchecked(0))
+                }
+            }
+        }
+    };
+}
+
+macro_rules! gen_unary_impl_table {
+    ($trait_id:ident, $trait_method:ident) => {
+        macro_rules! impl_unary {
+            ($vid:ident: $fun:ident) => {
+                impl_unary_!(vec | $trait_id, $trait_method, $vid, $fun);
+            };
+            ($vid:ident[g]: $fun:ident) => {
+                impl_unary_!(gen | $trait_id, $trait_method, $vid, $fun);
+            };
+            ($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
+                impl_unary_!(
+                    scalar | $trait_id,
+                    $trait_method,
+                    $vid,
+                    [$sid; $sc],
+                    $fun
+                );
+            };
+            ($vid:ident[s]: $fun:ident) => {
+                impl_unary_!(scalar | $trait_id, $trait_method, $vid, $fun);
+            };
+            ($vid:ident[h => $vid_h:ident]: $fun:ident) => {
+                impl_unary_!(
+                    halves | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_h,
+                    $fun
+                );
+            };
+            ($vid:ident[q => $vid_q:ident]: $fun:ident) => {
+                impl_unary_!(
+                    quarter | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_q,
+                    $fun
+                );
+            };
+            ($vid:ident[t => $vid_t:ident]: $fun:ident) => {
+                impl_unary_!(
+                    twice | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_t,
+                    $fun
+                );
+            };
+        }
+    };
+}
+
+macro_rules! impl_tertiary_ {
+    // implementation mapping 1:1
+    (vec | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self, z: Self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    transmute($fun(
+                        transmute(self),
+                        transmute(y),
+                        transmute(z),
+                    ))
+                }
+            }
+        }
+    };
+    (scalar | $trait_id:ident, $trait_method:ident,
+     $vec_id:ident, [$sid:ident; $scount:expr], $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self, z: Self) -> Self {
+                unsafe {
+                    union U {
+                        vec: $vec_id,
+                        scalars: [$sid; $scount],
+                    }
+                    let mut x = U { vec: self }.scalars;
+                    let y = U { vec: y }.scalars;
+                    let z = U { vec: z }.scalars;
+                    for (x, (y, z)) in (&mut scalars).zip(&y).zip(&z) {
+                        *i = $fun(*i, *y, *z);
+                    }
+                    U { vec: x }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun twice on each of the vector halves:
+    (halves | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vech_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self, z: Self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    union U {
+                        vec: $vec_id,
+                        halves: [$vech_id; 2],
+                    }
+
+                    let mut x_halves = U { vec: self }.halves;
+                    let y_halves = U { vec: y }.halves;
+                    let z_halves = U { vec: z }.halves;
+
+                    *x_halves.get_unchecked_mut(0) = transmute($fun(
+                        transmute(*x_halves.get_unchecked(0)),
+                        transmute(*y_halves.get_unchecked(0)),
+                        transmute(*z_halves.get_unchecked(0)),
+                    ));
+                    *x_halves.get_unchecked_mut(1) = transmute($fun(
+                        transmute(*x_halves.get_unchecked(1)),
+                        transmute(*y_halves.get_unchecked(1)),
+                        transmute(*z_halves.get_unchecked(1)),
+                    ));
+
+                    U { halves: x_halves }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun four times on each of the vector quarters:
+    (quarter | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vecq_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self, z: Self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    union U {
+                        vec: $vec_id,
+                        quarters: [$vecq_id; 4],
+                    }
+
+                    let mut x_quarters = U { vec: self }.quarters;
+                    let y_quarters = U { vec: y }.quarters;
+                    let z_quarters = U { vec: z }.quarters;
+
+                    *x_quarters.get_unchecked_mut(0) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(0)),
+                        transmute(*y_quarters.get_unchecked(0)),
+                        transmute(*z_quarters.get_unchecked(0)),
+                    ));
+
+                    *x_quarters.get_unchecked_mut(1) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(1)),
+                        transmute(*y_quarters.get_unchecked(1)),
+                        transmute(*z_quarters.get_unchecked(1)),
+                    ));
+
+                    *x_quarters.get_unchecked_mut(2) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(2)),
+                        transmute(*y_quarters.get_unchecked(2)),
+                        transmute(*z_quarters.get_unchecked(2)),
+                    ));
+
+                    *x_quarters.get_unchecked_mut(3) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(3)),
+                        transmute(*y_quarters.get_unchecked(3)),
+                        transmute(*z_quarters.get_unchecked(3)),
+                    ));
+
+                    U { quarters: x_quarters }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun once on a vector twice as large:
+    (twice | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vect_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self, z: Self) -> Self {
+                unsafe {
+                    use crate::mem::{transmute, uninitialized};
+
+                    union U {
+                        vec: [$vec_id; 2],
+                        twice: $vect_id,
+                    }
+
+                    let x_twice = U { vec: [self, uninitialized()] }.twice;
+                    let y_twice = U { vec: [y, uninitialized()] }.twice;
+                    let z_twice = U { vec: [z, uninitialized()] }.twice;
+                    let twice: $vect_id = transmute($fun(
+                        transmute(x_twice),
+                        transmute(y_twice),
+                        transmute(z_twice),
+                    ));
+
+                    *(U { twice }.vec.get_unchecked(0))
+                }
+            }
+        }
+    };
+}
+
+macro_rules! gen_tertiary_impl_table {
+    ($trait_id:ident, $trait_method:ident) => {
+        macro_rules! impl_tertiary {
+            ($vid:ident: $fun:ident) => {
+                impl_tertiary_!(vec | $trait_id, $trait_method, $vid, $fun);
+            };
+            ($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
+                impl_tertiary_!(
+                    scalar | $trait_id,
+                    $trait_method,
+                    $vid,
+                    [$sid; $sc],
+                    $fun
+                );
+            };
+            ($vid:ident[s]: $fun:ident) => {
+                impl_tertiary_!(scalar | $trait_id, $trait_method, $vid, $fun);
+            };
+            ($vid:ident[h => $vid_h:ident]: $fun:ident) => {
+                impl_tertiary_!(
+                    halves | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_h,
+                    $fun
+                );
+            };
+            ($vid:ident[q => $vid_q:ident]: $fun:ident) => {
+                impl_tertiary_!(
+                    quarter | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_q,
+                    $fun
+                );
+            };
+            ($vid:ident[t => $vid_t:ident]: $fun:ident) => {
+                impl_tertiary_!(
+                    twice | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_t,
+                    $fun
+                );
+            };
+        }
+    };
+}
+
+macro_rules! impl_binary_ {
+    // implementation mapping 1:1
+    (vec | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    transmute($fun(transmute(self), transmute(y)))
+                }
+            }
+        }
+    };
+    (scalar | $trait_id:ident, $trait_method:ident,
+     $vec_id:ident, [$sid:ident; $scount:expr], $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self) -> Self {
+                unsafe {
+                    union U {
+                        vec: $vec_id,
+                        scalars: [$sid; $scount],
+                    }
+                    let mut x = U { vec: self }.scalars;
+                    let y = U { vec: y }.scalars;
+                    for (x, y) in x.iter_mut().zip(&y) {
+                        *x = $fun(*x, *y);
+                    }
+                    U { scalars: x }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun twice on each of the vector halves:
+    (halves | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vech_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    union U {
+                        vec: $vec_id,
+                        halves: [$vech_id; 2],
+                    }
+
+                    let mut x_halves = U { vec: self }.halves;
+                    let y_halves = U { vec: y }.halves;
+
+                    *x_halves.get_unchecked_mut(0) = transmute($fun(
+                        transmute(*x_halves.get_unchecked(0)),
+                        transmute(*y_halves.get_unchecked(0)),
+                    ));
+                    *x_halves.get_unchecked_mut(1) = transmute($fun(
+                        transmute(*x_halves.get_unchecked(1)),
+                        transmute(*y_halves.get_unchecked(1)),
+                    ));
+
+                    U { halves: x_halves }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun four times on each of the vector quarters:
+    (quarter | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vecq_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self) -> Self {
+                unsafe {
+                    use crate::mem::transmute;
+                    union U {
+                        vec: $vec_id,
+                        quarters: [$vecq_id; 4],
+                    }
+
+                    let mut x_quarters = U { vec: self }.quarters;
+                    let y_quarters = U { vec: y }.quarters;
+
+                    *x_quarters.get_unchecked_mut(0) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(0)),
+                        transmute(*y_quarters.get_unchecked(0)),
+                    ));
+
+                    *x_quarters.get_unchecked_mut(1) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(1)),
+                        transmute(*y_quarters.get_unchecked(1)),
+                    ));
+
+                    *x_quarters.get_unchecked_mut(2) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(2)),
+                        transmute(*y_quarters.get_unchecked(2)),
+                    ));
+
+                    *x_quarters.get_unchecked_mut(3) = transmute($fun(
+                        transmute(*x_quarters.get_unchecked(3)),
+                        transmute(*y_quarters.get_unchecked(3)),
+                    ));
+
+                    U { quarters: x_quarters }.vec
+                }
+            }
+        }
+    };
+    // implementation calling fun once on a vector twice as large:
+    (twice | $trait_id:ident, $trait_method:ident, $vec_id:ident,
+     $vect_id:ident, $fun:ident) => {
+        impl $trait_id for $vec_id {
+            #[inline]
+            fn $trait_method(self, y: Self) -> Self {
+                unsafe {
+                    use crate::mem::{transmute, uninitialized};
+
+                    union U {
+                        vec: [$vec_id; 2],
+                        twice: $vect_id,
+                    }
+
+                    let x_twice = U { vec: [self, uninitialized()] }.twice;
+                    let y_twice = U { vec: [y, uninitialized()] }.twice;
+                    let twice: $vect_id = transmute($fun(
+                        transmute(x_twice),
+                        transmute(y_twice),
+                    ));
+
+                    *(U { twice }.vec.get_unchecked(0))
+                }
+            }
+        }
+    };
+}
+
+macro_rules! gen_binary_impl_table {
+    ($trait_id:ident, $trait_method:ident) => {
+        macro_rules! impl_binary {
+            ($vid:ident: $fun:ident) => {
+                impl_binary_!(vec | $trait_id, $trait_method, $vid, $fun);
+            };
+            ($vid:ident[$sid:ident; $sc:expr]: $fun:ident) => {
+                impl_binary_!(
+                    scalar | $trait_id,
+                    $trait_method,
+                    $vid,
+                    [$sid; $sc],
+                    $fun
+                );
+            };
+            ($vid:ident[s]: $fun:ident) => {
+                impl_binary_!(scalar | $trait_id, $trait_method, $vid, $fun);
+            };
+            ($vid:ident[h => $vid_h:ident]: $fun:ident) => {
+                impl_binary_!(
+                    halves | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_h,
+                    $fun
+                );
+            };
+            ($vid:ident[q => $vid_q:ident]: $fun:ident) => {
+                impl_binary_!(
+                    quarter | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_q,
+                    $fun
+                );
+            };
+            ($vid:ident[t => $vid_t:ident]: $fun:ident) => {
+                impl_binary_!(
+                    twice | $trait_id,
+                    $trait_method,
+                    $vid,
+                    $vid_t,
+                    $fun
+                );
+            };
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/mul_add.rs.html b/src/packed_simd/codegen/math/float/mul_add.rs.html new file mode 100644 index 000000000..5bca4fd69 --- /dev/null +++ b/src/packed_simd/codegen/math/float/mul_add.rs.html @@ -0,0 +1,221 @@ +mul_add.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+
+//! Vertical floating-point `mul_add`
+#![allow(unused)]
+use crate::*;
+
+// FIXME: 64-bit 1 element mul_add
+
+crate trait MulAdd {
+    fn mul_add(self, y: Self, z: Self) -> Self;
+}
+
+#[cfg(not(target_arch = "s390x"))]
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.fma.v2f32"]
+    fn fma_v2f32(x: f32x2, y: f32x2, z: f32x2) -> f32x2;
+    #[link_name = "llvm.fma.v4f32"]
+    fn fma_v4f32(x: f32x4, y: f32x4, z: f32x4) -> f32x4;
+    #[link_name = "llvm.fma.v8f32"]
+    fn fma_v8f32(x: f32x8, y: f32x8, z: f32x8) -> f32x8;
+    #[link_name = "llvm.fma.v16f32"]
+    fn fma_v16f32(x: f32x16, y: f32x16, z: f32x16) -> f32x16;
+    /* FIXME 64-bit single elem vectors
+    #[link_name = "llvm.fma.v1f64"]
+    fn fma_v1f64(x: f64x1, y: f64x1, z: f64x1) -> f64x1;
+    */
+    #[link_name = "llvm.fma.v2f64"]
+    fn fma_v2f64(x: f64x2, y: f64x2, z: f64x2) -> f64x2;
+    #[link_name = "llvm.fma.v4f64"]
+    fn fma_v4f64(x: f64x4, y: f64x4, z: f64x4) -> f64x4;
+    #[link_name = "llvm.fma.v8f64"]
+    fn fma_v8f64(x: f64x8, y: f64x8, z: f64x8) -> f64x8;
+}
+
+gen_tertiary_impl_table!(MulAdd, mul_add);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        macro_rules! impl_broken {
+            ($id:ident) => {
+                impl MulAdd for $id {
+                    #[inline]
+                    fn mul_add(self, y: Self, z: Self) -> Self {
+                        self * y + z
+                    }
+                }
+            };
+        }
+
+        impl_broken!(f32x2);
+        impl_broken!(f32x4);
+        impl_broken!(f32x8);
+        impl_broken!(f32x16);
+
+        impl_broken!(f64x2);
+        impl_broken!(f64x4);
+        impl_broken!(f64x8);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_tertiary!(f32x2[t => f32x4]: Sleef_fmaf4_avx2128);
+                impl_tertiary!(f32x16[h => f32x8]: Sleef_fmaf8_avx2);
+                impl_tertiary!(f64x8[h => f64x4]: Sleef_fmad4_avx2);
+
+                impl_tertiary!(f32x4: Sleef_fmaf4_avx2128);
+                impl_tertiary!(f32x8: Sleef_fmaf8_avx2);
+                impl_tertiary!(f64x2: Sleef_fmad2_avx2128);
+                impl_tertiary!(f64x4: Sleef_fmad4_avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_tertiary!(f32x2[t => f32x4]: Sleef_fmaf4_sse4);
+                impl_tertiary!(f32x16[h => f32x8]: Sleef_fmaf8_avx);
+                impl_tertiary!(f64x8[h => f64x4]: Sleef_fmad4_avx);
+
+                impl_tertiary!(f32x4: Sleef_fmaf4_sse4);
+                impl_tertiary!(f32x8: Sleef_fmaf8_avx);
+                impl_tertiary!(f64x2: Sleef_fmad2_sse4);
+                impl_tertiary!(f64x4: Sleef_fmad4_avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_tertiary!(f32x2[t => f32x4]: Sleef_fmaf4_sse4);
+                impl_tertiary!(f32x16[q => f32x4]: Sleef_fmaf4_sse4);
+                impl_tertiary!(f64x8[q => f64x2]: Sleef_fmad2_sse4);
+
+                impl_tertiary!(f32x4: Sleef_fmaf4_sse4);
+                impl_tertiary!(f32x8[h => f32x4]: Sleef_fmaf4_sse4);
+                impl_tertiary!(f64x2: Sleef_fmad2_sse4);
+                impl_tertiary!(f64x4[h => f64x2]: Sleef_fmad2_sse4);
+            } else {
+                impl_tertiary!(f32x2: fma_v2f32);
+                impl_tertiary!(f32x16: fma_v16f32);
+                impl_tertiary!(f64x8: fma_v8f64);
+
+                impl_tertiary!(f32x4: fma_v4f32);
+                impl_tertiary!(f32x8: fma_v8f32);
+                impl_tertiary!(f64x2: fma_v2f64);
+                impl_tertiary!(f64x4: fma_v4f64);
+            }
+        }
+    } else {
+        impl_tertiary!(f32x2: fma_v2f32);
+        impl_tertiary!(f32x4: fma_v4f32);
+        impl_tertiary!(f32x8: fma_v8f32);
+        impl_tertiary!(f32x16: fma_v16f32);
+        // impl_tertiary!(f64x1: fma_v1f64); // FIXME 64-bit fmagle elem vectors
+        impl_tertiary!(f64x2: fma_v2f64);
+        impl_tertiary!(f64x4: fma_v4f64);
+        impl_tertiary!(f64x8: fma_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/mul_adde.rs.html b/src/packed_simd/codegen/math/float/mul_adde.rs.html new file mode 100644 index 000000000..fab7f8241 --- /dev/null +++ b/src/packed_simd/codegen/math/float/mul_adde.rs.html @@ -0,0 +1,135 @@ +mul_adde.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+
+//! Approximation for floating-point `mul_add`
+use crate::*;
+
+// FIXME: 64-bit 1 element mul_adde
+
+crate trait MulAddE {
+    fn mul_adde(self, y: Self, z: Self) -> Self;
+}
+
+#[cfg(not(target_arch = "s390x"))]
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.fmuladd.v2f32"]
+    fn fmuladd_v2f32(x: f32x2, y: f32x2, z: f32x2) -> f32x2;
+    #[link_name = "llvm.fmuladd.v4f32"]
+    fn fmuladd_v4f32(x: f32x4, y: f32x4, z: f32x4) -> f32x4;
+    #[link_name = "llvm.fmuladd.v8f32"]
+    fn fmuladd_v8f32(x: f32x8, y: f32x8, z: f32x8) -> f32x8;
+    #[link_name = "llvm.fmuladd.v16f32"]
+    fn fmuladd_v16f32(x: f32x16, y: f32x16, z: f32x16) -> f32x16;
+    /* FIXME 64-bit single elem vectors
+    #[link_name = "llvm.fmuladd.v1f64"]
+    fn fmuladd_v1f64(x: f64x1, y: f64x1, z: f64x1) -> f64x1;
+    */
+    #[link_name = "llvm.fmuladd.v2f64"]
+    fn fmuladd_v2f64(x: f64x2, y: f64x2, z: f64x2) -> f64x2;
+    #[link_name = "llvm.fmuladd.v4f64"]
+    fn fmuladd_v4f64(x: f64x4, y: f64x4, z: f64x4) -> f64x4;
+    #[link_name = "llvm.fmuladd.v8f64"]
+    fn fmuladd_v8f64(x: f64x8, y: f64x8, z: f64x8) -> f64x8;
+}
+
+macro_rules! impl_mul_adde {
+    ($id:ident : $fn:ident) => {
+        impl MulAddE for $id {
+            #[inline]
+            fn mul_adde(self, y: Self, z: Self) -> Self {
+                #[cfg(not(target_arch = "s390x"))]
+                {
+                    use crate::mem::transmute;
+                    unsafe {
+                        transmute($fn(
+                            transmute(self),
+                            transmute(y),
+                            transmute(z),
+                        ))
+                    }
+                }
+                #[cfg(target_arch = "s390x")]
+                {
+                    // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+                    self * y + z
+                }
+            }
+        }
+    };
+}
+
+impl_mul_adde!(f32x2: fmuladd_v2f32);
+impl_mul_adde!(f32x4: fmuladd_v4f32);
+impl_mul_adde!(f32x8: fmuladd_v8f32);
+impl_mul_adde!(f32x16: fmuladd_v16f32);
+// impl_mul_adde!(f64x1: fma_v1f64); // FIXME 64-bit fmagle elem vectors
+impl_mul_adde!(f64x2: fmuladd_v2f64);
+impl_mul_adde!(f64x4: fmuladd_v4f64);
+impl_mul_adde!(f64x8: fmuladd_v8f64);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/powf.rs.html b/src/packed_simd/codegen/math/float/powf.rs.html new file mode 100644 index 000000000..813e8a6c2 --- /dev/null +++ b/src/packed_simd/codegen/math/float/powf.rs.html @@ -0,0 +1,227 @@ +powf.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+
+//! Vertical floating-point `powf`
+#![allow(unused)]
+
+// FIXME 64-bit powfgle elem vectors mispowfg
+
+use crate::*;
+
+crate trait Powf {
+    fn powf(self, x: Self) -> Self;
+}
+
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.pow.v2f32"]
+    fn powf_v2f32(x: f32x2, y: f32x2) -> f32x2;
+    #[link_name = "llvm.pow.v4f32"]
+    fn powf_v4f32(x: f32x4, y: f32x4) -> f32x4;
+    #[link_name = "llvm.pow.v8f32"]
+    fn powf_v8f32(x: f32x8, y: f32x8) -> f32x8;
+    #[link_name = "llvm.pow.v16f32"]
+    fn powf_v16f32(x: f32x16, y: f32x16) -> f32x16;
+    /* FIXME 64-bit powfgle elem vectors
+    #[link_name = "llvm.pow.v1f64"]
+    fn powf_v1f64(x: f64x1, y: f64x1) -> f64x1;
+     */
+    #[link_name = "llvm.pow.v2f64"]
+    fn powf_v2f64(x: f64x2, y: f64x2) -> f64x2;
+    #[link_name = "llvm.pow.v4f64"]
+    fn powf_v4f64(x: f64x4, y: f64x4) -> f64x4;
+    #[link_name = "llvm.pow.v8f64"]
+    fn powf_v8f64(x: f64x8, y: f64x8) -> f64x8;
+
+    #[link_name = "llvm.pow.f32"]
+    fn powf_f32(x: f32, y: f32) -> f32;
+    #[link_name = "llvm.pow.f64"]
+    fn powf_f64(x: f64, y: f64) -> f64;
+}
+
+gen_binary_impl_table!(Powf, powf);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_binary!(f32x2[f32; 2]: powf_f32);
+        impl_binary!(f32x4[f32; 4]: powf_f32);
+        impl_binary!(f32x8[f32; 8]: powf_f32);
+        impl_binary!(f32x16[f32; 16]: powf_f32);
+
+        impl_binary!(f64x2[f64; 2]: powf_f64);
+        impl_binary!(f64x4[f64; 4]: powf_f64);
+        impl_binary!(f64x8[f64; 8]: powf_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_binary!(f32x2[t => f32x4]: Sleef_powf4_u10avx2128);
+                impl_binary!(f32x16[h => f32x8]: Sleef_powf8_u10avx2);
+                impl_binary!(f64x8[h => f64x4]: Sleef_powd4_u10avx2);
+
+                impl_binary!(f32x4: Sleef_powf4_u10avx2128);
+                impl_binary!(f32x8: Sleef_powf8_u10avx2);
+                impl_binary!(f64x2: Sleef_powd2_u10avx2128);
+                impl_binary!(f64x4: Sleef_powd4_u10avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_binary!(f32x2[t => f32x4]: Sleef_powf4_u10sse4);
+                impl_binary!(f32x16[h => f32x8]: Sleef_powf8_u10avx);
+                impl_binary!(f64x8[h => f64x4]: Sleef_powd4_u10avx);
+
+                impl_binary!(f32x4: Sleef_powf4_u10sse4);
+                impl_binary!(f32x8: Sleef_powf8_u10avx);
+                impl_binary!(f64x2: Sleef_powd2_u10sse4);
+                impl_binary!(f64x4: Sleef_powd4_u10avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_binary!(f32x2[t => f32x4]: Sleef_powf4_u10sse4);
+                impl_binary!(f32x16[q => f32x4]: Sleef_powf4_u10sse4);
+                impl_binary!(f64x8[q => f64x2]: Sleef_powd2_u10sse4);
+
+                impl_binary!(f32x4: Sleef_powf4_u10sse4);
+                impl_binary!(f32x8[h => f32x4]: Sleef_powf4_u10sse4);
+                impl_binary!(f64x2: Sleef_powd2_u10sse4);
+                impl_binary!(f64x4[h => f64x2]: Sleef_powd2_u10sse4);
+            } else if #[cfg(target_feature = "sse2")] {
+                impl_binary!(f32x2[t => f32x4]: Sleef_powf4_u10sse2);
+                impl_binary!(f32x16[q => f32x4]: Sleef_powf4_u10sse2);
+                impl_binary!(f64x8[q => f64x2]: Sleef_powd2_u10sse2);
+
+                impl_binary!(f32x4: Sleef_powf4_u10sse2);
+                impl_binary!(f32x8[h => f32x4]: Sleef_powf4_u10sse2);
+                impl_binary!(f64x2: Sleef_powd2_u10sse2);
+                impl_binary!(f64x4[h => f64x2]: Sleef_powd2_u10sse2);
+            } else {
+                impl_binary!(f32x2[f32; 2]: powf_f32);
+                impl_binary!(f32x4: powf_v4f32);
+                impl_binary!(f32x8: powf_v8f32);
+                impl_binary!(f32x16: powf_v16f32);
+
+                impl_binary!(f64x2: powf_v2f64);
+                impl_binary!(f64x4: powf_v4f64);
+                impl_binary!(f64x8: powf_v8f64);
+            }
+        }
+    } else {
+        impl_binary!(f32x2[f32; 2]: powf_f32);
+        impl_binary!(f32x4: powf_v4f32);
+        impl_binary!(f32x8: powf_v8f32);
+        impl_binary!(f32x16: powf_v16f32);
+
+        impl_binary!(f64x2: powf_v2f64);
+        impl_binary!(f64x4: powf_v4f64);
+        impl_binary!(f64x8: powf_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/sin.rs.html b/src/packed_simd/codegen/math/float/sin.rs.html new file mode 100644 index 000000000..f876888de --- /dev/null +++ b/src/packed_simd/codegen/math/float/sin.rs.html @@ -0,0 +1,209 @@ +sin.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+
+//! Vertical floating-point `sin`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors sin
+
+use crate::*;
+
+crate trait Sin {
+    fn sin(self) -> Self;
+}
+
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.sin.v2f32"]
+    fn sin_v2f32(x: f32x2) -> f32x2;
+    #[link_name = "llvm.sin.v4f32"]
+    fn sin_v4f32(x: f32x4) -> f32x4;
+    #[link_name = "llvm.sin.v8f32"]
+    fn sin_v8f32(x: f32x8) -> f32x8;
+    #[link_name = "llvm.sin.v16f32"]
+    fn sin_v16f32(x: f32x16) -> f32x16;
+    /* FIXME 64-bit single elem vectors
+    #[link_name = "llvm.sin.v1f64"]
+    fn sin_v1f64(x: f64x1) -> f64x1;
+     */
+    #[link_name = "llvm.sin.v2f64"]
+    fn sin_v2f64(x: f64x2) -> f64x2;
+    #[link_name = "llvm.sin.v4f64"]
+    fn sin_v4f64(x: f64x4) -> f64x4;
+    #[link_name = "llvm.sin.v8f64"]
+    fn sin_v8f64(x: f64x8) -> f64x8;
+
+    #[link_name = "llvm.sin.f32"]
+    fn sin_f32(x: f32) -> f32;
+    #[link_name = "llvm.sin.f64"]
+    fn sin_f64(x: f64) -> f64;
+}
+
+gen_unary_impl_table!(Sin, sin);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_unary!(f32x2[f32; 2]: sin_f32);
+        impl_unary!(f32x4[f32; 4]: sin_f32);
+        impl_unary!(f32x8[f32; 8]: sin_f32);
+        impl_unary!(f32x16[f32; 16]: sin_f32);
+
+        impl_unary!(f64x2[f64; 2]: sin_f64);
+        impl_unary!(f64x4[f64; 4]: sin_f64);
+        impl_unary!(f64x8[f64; 8]: sin_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sinf4_u10avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sinf8_u10avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sind4_u10avx2);
+
+                impl_unary!(f32x4: Sleef_sinf4_u10avx2128);
+                impl_unary!(f32x8: Sleef_sinf8_u10avx2);
+                impl_unary!(f64x2: Sleef_sind2_u10avx2128);
+                impl_unary!(f64x4: Sleef_sind4_u10avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sinf4_u10sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sinf8_u10avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sind4_u10avx);
+
+                impl_unary!(f32x4: Sleef_sinf4_u10sse4);
+                impl_unary!(f32x8: Sleef_sinf8_u10avx);
+                impl_unary!(f64x2: Sleef_sind2_u10sse4);
+                impl_unary!(f64x4: Sleef_sind4_u10avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sinf4_u10sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_sinf4_u10sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_sind2_u10sse4);
+
+                impl_unary!(f32x4: Sleef_sinf4_u10sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_sinf4_u10sse4);
+                impl_unary!(f64x2: Sleef_sind2_u10sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_sind2_u10sse4);
+            } else {
+                impl_unary!(f32x2[f32; 2]: sin_f32);
+                impl_unary!(f32x16: sin_v16f32);
+                impl_unary!(f64x8: sin_v8f64);
+
+                impl_unary!(f32x4: sin_v4f32);
+                impl_unary!(f32x8: sin_v8f32);
+                impl_unary!(f64x2: sin_v2f64);
+                impl_unary!(f64x4: sin_v4f64);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[f32; 2]: sin_f32);
+        impl_unary!(f32x4: sin_v4f32);
+        impl_unary!(f32x8: sin_v8f32);
+        impl_unary!(f32x16: sin_v16f32);
+
+        impl_unary!(f64x2: sin_v2f64);
+        impl_unary!(f64x4: sin_v4f64);
+        impl_unary!(f64x8: sin_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/sin_cos_pi.rs.html b/src/packed_simd/codegen/math/float/sin_cos_pi.rs.html new file mode 100644 index 000000000..11ff67cb2 --- /dev/null +++ b/src/packed_simd/codegen/math/float/sin_cos_pi.rs.html @@ -0,0 +1,393 @@ +sin_cos_pi.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+
+//! Vertical floating-point `sin_cos`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors sin_cos
+
+use crate::*;
+
+crate trait SinCosPi: Sized {
+    type Output;
+    fn sin_cos_pi(self) -> Self::Output;
+}
+
+macro_rules! impl_def {
+    ($vid:ident, $PI:path) => {
+        impl SinCosPi for $vid {
+            type Output = (Self, Self);
+            #[inline]
+            fn sin_cos_pi(self) -> Self::Output {
+                let v = self * Self::splat($PI);
+                (v.sin(), v.cos())
+            }
+        }
+    };
+}
+
+macro_rules! impl_def32 {
+    ($vid:ident) => {
+        impl_def!($vid, crate::f32::consts::PI);
+    };
+}
+macro_rules! impl_def64 {
+    ($vid:ident) => {
+        impl_def!($vid, crate::f64::consts::PI);
+    };
+}
+
+macro_rules! impl_unary_t {
+    ($vid:ident: $fun:ident) => {
+        impl SinCosPi for $vid {
+            type Output = (Self, Self);
+            fn sin_cos_pi(self) -> Self::Output {
+                unsafe {
+                    use crate::mem::transmute;
+                    transmute($fun(transmute(self)))
+                }
+            }
+        }
+    };
+    ($vid:ident[t => $vid_t:ident]: $fun:ident) => {
+        impl SinCosPi for $vid {
+            type Output = (Self, Self);
+            fn sin_cos_pi(self) -> Self::Output {
+                unsafe {
+                    use crate::mem::{transmute, uninitialized};
+
+                    union U {
+                        vec: [$vid; 2],
+                        twice: $vid_t,
+                    }
+
+                    let twice = U { vec: [self, uninitialized()] }.twice;
+                    let twice = transmute($fun(transmute(twice)));
+
+                    union R {
+                        twice: ($vid_t, $vid_t),
+                        vecs: ([$vid; 2], [$vid; 2]),
+                    }
+                    let r = R { twice }.vecs;
+                    (*r.0.get_unchecked(0), *r.0.get_unchecked(1))
+                }
+            }
+        }
+    };
+    ($vid:ident[h => $vid_h:ident]: $fun:ident) => {
+        impl SinCosPi for $vid {
+            type Output = (Self, Self);
+            fn sin_cos_pi(self) -> Self::Output {
+                unsafe {
+                    use crate::mem::transmute;
+
+                    union U {
+                        vec: $vid,
+                        halves: [$vid_h; 2],
+                    }
+
+                    let halves = U { vec: self }.halves;
+
+                    let res_0: ($vid_h, $vid_h) =
+                        transmute($fun(transmute(*halves.get_unchecked(0))));
+                    let res_1: ($vid_h, $vid_h) =
+                        transmute($fun(transmute(*halves.get_unchecked(1))));
+
+                    union R {
+                        result: ($vid, $vid),
+                        halves: ([$vid_h; 2], [$vid_h; 2]),
+                    }
+                    R { halves: ([res_0.0, res_1.0], [res_0.1, res_1.1]) }
+                        .result
+                }
+            }
+        }
+    };
+    ($vid:ident[q => $vid_q:ident]: $fun:ident) => {
+        impl SinCosPi for $vid {
+            type Output = (Self, Self);
+            fn sin_cos_pi(self) -> Self::Output {
+                unsafe {
+                    use crate::mem::transmute;
+
+                    union U {
+                        vec: $vid,
+                        quarters: [$vid_q; 4],
+                    }
+
+                    let quarters = U { vec: self }.quarters;
+
+                    let res_0: ($vid_q, $vid_q) =
+                        transmute($fun(transmute(*quarters.get_unchecked(0))));
+                    let res_1: ($vid_q, $vid_q) =
+                        transmute($fun(transmute(*quarters.get_unchecked(1))));
+                    let res_2: ($vid_q, $vid_q) =
+                        transmute($fun(transmute(*quarters.get_unchecked(2))));
+                    let res_3: ($vid_q, $vid_q) =
+                        transmute($fun(transmute(*quarters.get_unchecked(3))));
+
+                    union R {
+                        result: ($vid, $vid),
+                        quarters: ([$vid_q; 4], [$vid_q; 4]),
+                    }
+                    R {
+                        quarters: (
+                            [res_0.0, res_1.0, res_2.0, res_3.0],
+                            [res_0.1, res_1.1, res_2.1, res_3.1],
+                        ),
+                    }
+                    .result
+                }
+            }
+        }
+    };
+}
+
+cfg_if! {
+    if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary_t!(f32x2[t => f32x4]: Sleef_sincospif4_u05avx2128);
+                impl_unary_t!(f32x16[h => f32x8]: Sleef_sincospif8_u05avx2);
+                impl_unary_t!(f64x8[h => f64x4]: Sleef_sincospid4_u05avx2);
+
+                impl_unary_t!(f32x4: Sleef_sincospif4_u05avx2128);
+                impl_unary_t!(f32x8: Sleef_sincospif8_u05avx2);
+                impl_unary_t!(f64x2: Sleef_sincospid2_u05avx2128);
+                impl_unary_t!(f64x4: Sleef_sincospid4_u05avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary_t!(f32x2[t => f32x4]: Sleef_sincospif4_u05sse4);
+                impl_unary_t!(f32x16[h => f32x8]: Sleef_sincospif8_u05avx);
+                impl_unary_t!(f64x8[h => f64x4]: Sleef_sincospid4_u05avx);
+
+                impl_unary_t!(f32x4: Sleef_sincospif4_u05sse4);
+                impl_unary_t!(f32x8: Sleef_sincospif8_u05avx);
+                impl_unary_t!(f64x2: Sleef_sincospid2_u05sse4);
+                impl_unary_t!(f64x4: Sleef_sincospid4_u05avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary_t!(f32x2[t => f32x4]: Sleef_sincospif4_u05sse4);
+                impl_unary_t!(f32x16[q => f32x4]: Sleef_sincospif4_u05sse4);
+                impl_unary_t!(f64x8[q => f64x2]: Sleef_sincospid2_u05sse4);
+
+                impl_unary_t!(f32x4: Sleef_sincospif4_u05sse4);
+                impl_unary_t!(f32x8[h => f32x4]: Sleef_sincospif4_u05sse4);
+                impl_unary_t!(f64x2: Sleef_sincospid2_u05sse4);
+                impl_unary_t!(f64x4[h => f64x2]: Sleef_sincospid2_u05sse4);
+            } else {
+                impl_def32!(f32x2);
+                impl_def32!(f32x4);
+                impl_def32!(f32x8);
+                impl_def32!(f32x16);
+
+                impl_def64!(f64x2);
+                impl_def64!(f64x4);
+                impl_def64!(f64x8);
+            }
+        }
+    } else {
+        impl_def32!(f32x2);
+        impl_def32!(f32x4);
+        impl_def32!(f32x8);
+        impl_def32!(f32x16);
+
+        impl_def64!(f64x2);
+        impl_def64!(f64x4);
+        impl_def64!(f64x8);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/sin_pi.rs.html b/src/packed_simd/codegen/math/float/sin_pi.rs.html new file mode 100644 index 000000000..1a949d1ce --- /dev/null +++ b/src/packed_simd/codegen/math/float/sin_pi.rs.html @@ -0,0 +1,177 @@ +sin_pi.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+
+//! Vertical floating-point `sin_pi`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors sin_pi
+
+use crate::*;
+
+crate trait SinPi {
+    fn sin_pi(self) -> Self;
+}
+
+gen_unary_impl_table!(SinPi, sin_pi);
+
+macro_rules! impl_def {
+    ($vid:ident, $PI:path) => {
+        impl SinPi for $vid {
+            #[inline]
+            fn sin_pi(self) -> Self {
+                (self * Self::splat($PI)).sin()
+            }
+        }
+    };
+}
+macro_rules! impl_def32 {
+    ($vid:ident) => {
+        impl_def!($vid, crate::f32::consts::PI);
+    };
+}
+macro_rules! impl_def64 {
+    ($vid:ident) => {
+        impl_def!($vid, crate::f64::consts::PI);
+    };
+}
+
+cfg_if! {
+    if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sinpif4_u05avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sinpif8_u05avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sinpid4_u05avx2);
+
+                impl_unary!(f32x4: Sleef_sinpif4_u05avx2128);
+                impl_unary!(f32x8: Sleef_sinpif8_u05avx2);
+                impl_unary!(f64x2: Sleef_sinpid2_u05avx2128);
+                impl_unary!(f64x4: Sleef_sinpid4_u05avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sinpif4_u05sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sinpif8_u05avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sinpid4_u05avx);
+
+                impl_unary!(f32x4: Sleef_sinpif4_u05sse4);
+                impl_unary!(f32x8: Sleef_sinpif8_u05avx);
+                impl_unary!(f64x2: Sleef_sinpid2_u05sse4);
+                impl_unary!(f64x4: Sleef_sinpid4_u05avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sinpif4_u05sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_sinpif4_u05sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_sinpid2_u05sse4);
+
+                impl_unary!(f32x4: Sleef_sinpif4_u05sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_sinpif4_u05sse4);
+                impl_unary!(f64x2: Sleef_sinpid2_u05sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_sinpid2_u05sse4);
+            } else {
+                impl_def32!(f32x2);
+                impl_def32!(f32x4);
+                impl_def32!(f32x8);
+                impl_def32!(f32x16);
+
+                impl_def64!(f64x2);
+                impl_def64!(f64x4);
+                impl_def64!(f64x8);
+            }
+        }
+    } else {
+        impl_def32!(f32x2);
+        impl_def32!(f32x4);
+        impl_def32!(f32x8);
+        impl_def32!(f32x16);
+
+        impl_def64!(f64x2);
+        impl_def64!(f64x4);
+        impl_def64!(f64x8);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/sqrt.rs.html b/src/packed_simd/codegen/math/float/sqrt.rs.html new file mode 100644 index 000000000..e368f2be0 --- /dev/null +++ b/src/packed_simd/codegen/math/float/sqrt.rs.html @@ -0,0 +1,209 @@ +sqrt.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+
+//! Vertical floating-point `sqrt`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors sqrt
+
+use crate::*;
+
+crate trait Sqrt {
+    fn sqrt(self) -> Self;
+}
+
+#[allow(improper_ctypes)]
+extern "C" {
+    #[link_name = "llvm.sqrt.v2f32"]
+    fn sqrt_v2f32(x: f32x2) -> f32x2;
+    #[link_name = "llvm.sqrt.v4f32"]
+    fn sqrt_v4f32(x: f32x4) -> f32x4;
+    #[link_name = "llvm.sqrt.v8f32"]
+    fn sqrt_v8f32(x: f32x8) -> f32x8;
+    #[link_name = "llvm.sqrt.v16f32"]
+    fn sqrt_v16f32(x: f32x16) -> f32x16;
+    /* FIXME 64-bit sqrtgle elem vectors
+    #[link_name = "llvm.sqrt.v1f64"]
+    fn sqrt_v1f64(x: f64x1) -> f64x1;
+     */
+    #[link_name = "llvm.sqrt.v2f64"]
+    fn sqrt_v2f64(x: f64x2) -> f64x2;
+    #[link_name = "llvm.sqrt.v4f64"]
+    fn sqrt_v4f64(x: f64x4) -> f64x4;
+    #[link_name = "llvm.sqrt.v8f64"]
+    fn sqrt_v8f64(x: f64x8) -> f64x8;
+
+    #[link_name = "llvm.sqrt.f32"]
+    fn sqrt_f32(x: f32) -> f32;
+    #[link_name = "llvm.sqrt.f64"]
+    fn sqrt_f64(x: f64) -> f64;
+}
+
+gen_unary_impl_table!(Sqrt, sqrt);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_unary!(f32x2[f32; 2]: sqrt_f32);
+        impl_unary!(f32x4[f32; 4]: sqrt_f32);
+        impl_unary!(f32x8[f32; 8]: sqrt_f32);
+        impl_unary!(f32x16[f32; 16]: sqrt_f32);
+
+        impl_unary!(f64x2[f64; 2]: sqrt_f64);
+        impl_unary!(f64x4[f64; 4]: sqrt_f64);
+        impl_unary!(f64x8[f64; 8]: sqrt_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sqrtf4_avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sqrtf8_avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sqrtd4_avx2);
+
+                impl_unary!(f32x4: Sleef_sqrtf4_avx2128);
+                impl_unary!(f32x8: Sleef_sqrtf8_avx2);
+                impl_unary!(f64x2: Sleef_sqrtd2_avx2128);
+                impl_unary!(f64x4: Sleef_sqrtd4_avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sqrtf4_sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sqrtf8_avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sqrtd4_avx);
+
+                impl_unary!(f32x4: Sleef_sqrtf4_sse4);
+                impl_unary!(f32x8: Sleef_sqrtf8_avx);
+                impl_unary!(f64x2: Sleef_sqrtd2_sse4);
+                impl_unary!(f64x4: Sleef_sqrtd4_avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sqrtf4_sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_sqrtf4_sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_sqrtd2_sse4);
+
+                impl_unary!(f32x4: Sleef_sqrtf4_sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_sqrtf4_sse4);
+                impl_unary!(f64x2: Sleef_sqrtd2_sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_sqrtd2_sse4);
+            } else {
+                impl_unary!(f32x2[f32; 2]: sqrt_f32);
+                impl_unary!(f32x16: sqrt_v16f32);
+                impl_unary!(f64x8: sqrt_v8f64);
+
+                impl_unary!(f32x4: sqrt_v4f32);
+                impl_unary!(f32x8: sqrt_v8f32);
+                impl_unary!(f64x2: sqrt_v2f64);
+                impl_unary!(f64x4: sqrt_v4f64);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[f32; 2]: sqrt_f32);
+        impl_unary!(f32x4: sqrt_v4f32);
+        impl_unary!(f32x8: sqrt_v8f32);
+        impl_unary!(f32x16: sqrt_v16f32);
+
+        impl_unary!(f64x2: sqrt_v2f64);
+        impl_unary!(f64x4: sqrt_v4f64);
+        impl_unary!(f64x8: sqrt_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/sqrte.rs.html b/src/packed_simd/codegen/math/float/sqrte.rs.html new file mode 100644 index 000000000..4d54e80fb --- /dev/null +++ b/src/packed_simd/codegen/math/float/sqrte.rs.html @@ -0,0 +1,137 @@ +sqrte.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+
+//! Vertical floating-point `sqrt`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors sqrte
+
+use crate::llvm::simd_fsqrt;
+use crate::*;
+
+crate trait Sqrte {
+    fn sqrte(self) -> Self;
+}
+
+gen_unary_impl_table!(Sqrte, sqrte);
+
+cfg_if! {
+    if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sqrtf4_u35avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sqrtf8_u35avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sqrtd4_u35avx2);
+
+                impl_unary!(f32x4: Sleef_sqrtf4_u35avx2128);
+                impl_unary!(f32x8: Sleef_sqrtf8_u35avx2);
+                impl_unary!(f64x2: Sleef_sqrtd2_u35avx2128);
+                impl_unary!(f64x4: Sleef_sqrtd4_u35avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sqrtf4_u35sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_sqrtf8_u35avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_sqrtd4_u35avx);
+
+                impl_unary!(f32x4: Sleef_sqrtf4_u35sse4);
+                impl_unary!(f32x8: Sleef_sqrtf8_u35avx);
+                impl_unary!(f64x2: Sleef_sqrtd2_u35sse4);
+                impl_unary!(f64x4: Sleef_sqrtd4_u35avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_sqrtf4_u35sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_sqrtf4_u35sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_sqrtd2_u35sse4);
+
+                impl_unary!(f32x4: Sleef_sqrtf4_u35sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_sqrtf4_u35sse4);
+                impl_unary!(f64x2: Sleef_sqrtd2_u35sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_sqrtd2_u35sse4);
+            } else {
+                impl_unary!(f32x2[g]: simd_fsqrt);
+                impl_unary!(f32x16[g]: simd_fsqrt);
+                impl_unary!(f64x8[g]: simd_fsqrt);
+
+                impl_unary!(f32x4[g]: simd_fsqrt);
+                impl_unary!(f32x8[g]: simd_fsqrt);
+                impl_unary!(f64x2[g]: simd_fsqrt);
+                impl_unary!(f64x4[g]: simd_fsqrt);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[g]: simd_fsqrt);
+        impl_unary!(f32x4[g]: simd_fsqrt);
+        impl_unary!(f32x8[g]: simd_fsqrt);
+        impl_unary!(f32x16[g]: simd_fsqrt);
+
+        impl_unary!(f64x2[g]: simd_fsqrt);
+        impl_unary!(f64x4[g]: simd_fsqrt);
+        impl_unary!(f64x8[g]: simd_fsqrt);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/math/float/tanh.rs.html b/src/packed_simd/codegen/math/float/tanh.rs.html new file mode 100644 index 000000000..f9506af4b --- /dev/null +++ b/src/packed_simd/codegen/math/float/tanh.rs.html @@ -0,0 +1,237 @@ +tanh.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+
+//! Vertical floating-point `tanh`
+#![allow(unused)]
+
+// FIXME 64-bit 1 elem vectors tanh
+
+use crate::*;
+
+crate trait Tanh {
+    fn tanh(self) -> Self;
+}
+
+macro_rules! define_tanh {
+
+    ($name:ident, $basetype:ty, $simdtype:ty, $lanes:expr, $trait:path) => {
+        fn $name(x: $simdtype) -> $simdtype {
+            use core::intrinsics::transmute;
+            let mut buf: [$basetype; $lanes] = unsafe { transmute(x) };
+            for elem in &mut buf {
+                *elem = <$basetype as $trait>::tanh(*elem);
+            }
+            unsafe { transmute(buf) }
+        }
+    };
+
+    (f32 => $name:ident, $type:ty, $lanes:expr) => {
+        define_tanh!($name, f32, $type, $lanes, libm::F32Ext);
+    };
+
+    (f64 => $name:ident, $type:ty, $lanes:expr) => {
+        define_tanh!($name, f64, $type, $lanes, libm::F64Ext);
+    };
+}
+
+// llvm does not seem to expose the hyperbolic versions of trigonometric functions;
+// we thus call the classical rust versions on all of them (which stem from cmath).
+define_tanh!(f32 => tanh_v2f32, f32x2, 2);
+define_tanh!(f32 => tanh_v4f32, f32x4, 4);
+define_tanh!(f32 => tanh_v8f32, f32x8, 8);
+define_tanh!(f32 => tanh_v16f32, f32x16, 16);
+
+define_tanh!(f64 => tanh_v2f64, f64x2, 2);
+define_tanh!(f64 => tanh_v4f64, f64x4, 4);
+define_tanh!(f64 => tanh_v8f64, f64x8, 8);
+
+fn tanh_f32(x: f32) -> f32 {
+    libm::F32Ext::tanh(x)
+}
+
+fn tanh_f64(x: f64) -> f64 {
+    libm::F64Ext::tanh(x)
+}
+
+gen_unary_impl_table!(Tanh, tanh);
+
+cfg_if! {
+    if #[cfg(target_arch = "s390x")] {
+        // FIXME: https://github.com/rust-lang-nursery/packed_simd/issues/14
+        impl_unary!(f32x2[f32; 2]: tanh_f32);
+        impl_unary!(f32x4[f32; 4]: tanh_f32);
+        impl_unary!(f32x8[f32; 8]: tanh_f32);
+        impl_unary!(f32x16[f32; 16]: tanh_f32);
+
+        impl_unary!(f64x2[f64; 2]: tanh_f64);
+        impl_unary!(f64x4[f64; 4]: tanh_f64);
+        impl_unary!(f64x8[f64; 8]: tanh_f64);
+    } else if #[cfg(all(target_arch = "x86_64", feature = "sleef-sys"))] {
+        use sleef_sys::*;
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_tanhf4_u10avx2128);
+                impl_unary!(f32x16[h => f32x8]: Sleef_tanhf8_u10avx2);
+                impl_unary!(f64x8[h => f64x4]: Sleef_tanhd4_u10avx2);
+
+                impl_unary!(f32x4: Sleef_tanhf4_u10avx2128);
+                impl_unary!(f32x8: Sleef_tanhf8_u10avx2);
+                impl_unary!(f64x2: Sleef_tanhd2_u10avx2128);
+                impl_unary!(f64x4: Sleef_tanhd4_u10avx2);
+            } else if #[cfg(target_feature = "avx")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_tanhf4_u10sse4);
+                impl_unary!(f32x16[h => f32x8]: Sleef_tanhf8_u10avx);
+                impl_unary!(f64x8[h => f64x4]: Sleef_tanhd4_u10avx);
+
+                impl_unary!(f32x4: Sleef_tanhf4_u10sse4);
+                impl_unary!(f32x8: Sleef_tanhf8_u10avx);
+                impl_unary!(f64x2: Sleef_tanhd2_u10sse4);
+                impl_unary!(f64x4: Sleef_tanhd4_u10avx);
+            } else if #[cfg(target_feature = "sse4.2")] {
+                impl_unary!(f32x2[t => f32x4]: Sleef_tanhf4_u10sse4);
+                impl_unary!(f32x16[q => f32x4]: Sleef_tanhf4_u10sse4);
+                impl_unary!(f64x8[q => f64x2]: Sleef_tanhd2_u10sse4);
+
+                impl_unary!(f32x4: Sleef_tanhf4_u10sse4);
+                impl_unary!(f32x8[h => f32x4]: Sleef_tanhf4_u10sse4);
+                impl_unary!(f64x2: Sleef_tanhd2_u10sse4);
+                impl_unary!(f64x4[h => f64x2]: Sleef_tanhd2_u10sse4);
+            } else {
+                impl_unary!(f32x2[f32; 2]: tanh_f32);
+                impl_unary!(f32x16: tanh_v16f32);
+                impl_unary!(f64x8: tanh_v8f64);
+
+                impl_unary!(f32x4: tanh_v4f32);
+                impl_unary!(f32x8: tanh_v8f32);
+                impl_unary!(f64x2: tanh_v2f64);
+                impl_unary!(f64x4: tanh_v4f64);
+            }
+        }
+    } else {
+        impl_unary!(f32x2[f32; 2]: tanh_f32);
+        impl_unary!(f32x4: tanh_v4f32);
+        impl_unary!(f32x8: tanh_v8f32);
+        impl_unary!(f32x16: tanh_v16f32);
+
+        impl_unary!(f64x2: tanh_v2f64);
+        impl_unary!(f64x4: tanh_v4f64);
+        impl_unary!(f64x8: tanh_v8f64);
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/pointer_sized_int.rs.html b/src/packed_simd/codegen/pointer_sized_int.rs.html new file mode 100644 index 000000000..ea08e7581 --- /dev/null +++ b/src/packed_simd/codegen/pointer_sized_int.rs.html @@ -0,0 +1,59 @@ +pointer_sized_int.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+
+//! Provides `isize` and `usize`
+
+use cfg_if::cfg_if;
+
+cfg_if! {
+    if #[cfg(target_pointer_width = "8")] {
+        crate type isize_ = i8;
+        crate type usize_ = u8;
+    } else if #[cfg(target_pointer_width = "16")] {
+        crate type isize_ = i16;
+        crate type usize_ = u16;
+    } else if #[cfg(target_pointer_width = "32")] {
+        crate type isize_ = i32;
+        crate type usize_ = u32;
+
+    } else if #[cfg(target_pointer_width = "64")] {
+        crate type isize_ = i64;
+        crate type usize_ = u64;
+    } else if #[cfg(target_pointer_width = "64")] {
+        crate type isize_ = i64;
+        crate type usize_ = u64;
+    } else if #[cfg(target_pointer_width = "128")] {
+        crate type isize_ = i128;
+        crate type usize_ = u128;
+    } else {
+        compile_error!("unsupported target_pointer_width");
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/reductions.rs.html b/src/packed_simd/codegen/reductions.rs.html new file mode 100644 index 000000000..bf26efc64 --- /dev/null +++ b/src/packed_simd/codegen/reductions.rs.html @@ -0,0 +1,5 @@ +reductions.rs.html -- source
1
+
+crate mod mask;
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/reductions/mask.rs.html b/src/packed_simd/codegen/reductions/mask.rs.html new file mode 100644 index 000000000..0982a6703 --- /dev/null +++ b/src/packed_simd/codegen/reductions/mask.rs.html @@ -0,0 +1,141 @@ +mask.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+
+//! Code generation workaround for `all()` mask horizontal reduction.
+//!
+//! Works arround [LLVM bug 36702].
+//!
+//! [LLVM bug 36702]: https://bugs.llvm.org/show_bug.cgi?id=36702
+#![allow(unused_macros)]
+
+use crate::*;
+
+crate trait All: crate::marker::Sized {
+    unsafe fn all(self) -> bool;
+}
+
+crate trait Any: crate::marker::Sized {
+    unsafe fn any(self) -> bool;
+}
+
+#[macro_use]
+mod fallback_impl;
+
+cfg_if! {
+    if #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] {
+        #[macro_use]
+        mod x86;
+    } else if #[cfg(all(target_arch = "arm", target_feature = "v7",
+                        target_feature = "neon",
+                        any(feature = "core_arch", libcore_neon)))] {
+        #[macro_use]
+        mod arm;
+    } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] {
+        #[macro_use]
+        mod aarch64;
+    } else {
+        #[macro_use]
+        mod fallback;
+    }
+}
+
+impl_mask_reductions!(m8x2);
+impl_mask_reductions!(m8x4);
+impl_mask_reductions!(m8x8);
+impl_mask_reductions!(m8x16);
+impl_mask_reductions!(m8x32);
+impl_mask_reductions!(m8x64);
+
+impl_mask_reductions!(m16x2);
+impl_mask_reductions!(m16x4);
+impl_mask_reductions!(m16x8);
+impl_mask_reductions!(m16x16);
+impl_mask_reductions!(m16x32);
+
+impl_mask_reductions!(m32x2);
+impl_mask_reductions!(m32x4);
+impl_mask_reductions!(m32x8);
+impl_mask_reductions!(m32x16);
+
+// FIXME: 64-bit single element vector
+// impl_mask_reductions!(m64x1);
+impl_mask_reductions!(m64x2);
+impl_mask_reductions!(m64x4);
+impl_mask_reductions!(m64x8);
+
+impl_mask_reductions!(m128x1);
+impl_mask_reductions!(m128x2);
+impl_mask_reductions!(m128x4);
+
+impl_mask_reductions!(msizex2);
+impl_mask_reductions!(msizex4);
+impl_mask_reductions!(msizex8);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/reductions/mask/fallback_impl.rs.html b/src/packed_simd/codegen/reductions/mask/fallback_impl.rs.html new file mode 100644 index 000000000..6ef99680a --- /dev/null +++ b/src/packed_simd/codegen/reductions/mask/fallback_impl.rs.html @@ -0,0 +1,477 @@ +fallback_impl.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+
+//! Default implementation of a mask reduction for any target.
+
+macro_rules! fallback_to_other_impl {
+    ($id:ident, $other:ident) => {
+        impl All for $id {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                let m: $other = crate::mem::transmute(self);
+                m.all()
+            }
+        }
+        impl Any for $id {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                let m: $other = crate::mem::transmute(self);
+                m.any()
+            }
+        }
+    };
+}
+
+/// Fallback implementation.
+macro_rules! fallback_impl {
+    // 16-bit wide masks:
+    (m8x2) => {
+        impl All for m8x2 {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                let i: u16 = crate::mem::transmute(self);
+                i == u16::max_value()
+            }
+        }
+        impl Any for m8x2 {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                let i: u16 = crate::mem::transmute(self);
+                i != 0
+            }
+        }
+    };
+    // 32-bit wide masks
+    (m8x4) => {
+        impl All for m8x4 {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                let i: u32 = crate::mem::transmute(self);
+                i == u32::max_value()
+            }
+        }
+        impl Any for m8x4 {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                let i: u32 = crate::mem::transmute(self);
+                i != 0
+            }
+        }
+    };
+    (m16x2) => {
+        fallback_to_other_impl!(m16x2, m8x4);
+    };
+    // 64-bit wide masks:
+    (m8x8) => {
+        impl All for m8x8 {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                let i: u64 = crate::mem::transmute(self);
+                i == u64::max_value()
+            }
+        }
+        impl Any for m8x8 {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                let i: u64 = crate::mem::transmute(self);
+                i != 0
+            }
+        }
+    };
+    (m16x4) => {
+        fallback_to_other_impl!(m16x4, m8x8);
+    };
+    (m32x2) => {
+        fallback_to_other_impl!(m32x2, m16x4);
+    };
+    // FIXME: 64x1 maxk
+    // 128-bit wide masks:
+    (m8x16) => {
+        impl All for m8x16 {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                let i: u128 = crate::mem::transmute(self);
+                i == u128::max_value()
+            }
+        }
+        impl Any for m8x16 {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                let i: u128 = crate::mem::transmute(self);
+                i != 0
+            }
+        }
+    };
+    (m16x8) => {
+        fallback_to_other_impl!(m16x8, m8x16);
+    };
+    (m32x4) => {
+        fallback_to_other_impl!(m32x4, m16x8);
+    };
+    (m64x2) => {
+        fallback_to_other_impl!(m64x2, m32x4);
+    };
+    (m128x1) => {
+        fallback_to_other_impl!(m128x1, m64x2);
+    };
+    // 256-bit wide masks
+    (m8x32) => {
+        impl All for m8x32 {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                let i: [u128; 2] = crate::mem::transmute(self);
+                let o: [u128; 2] = [u128::max_value(); 2];
+                i == o
+            }
+        }
+        impl Any for m8x32 {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                let i: [u128; 2] = crate::mem::transmute(self);
+                let o: [u128; 2] = [0; 2];
+                i != o
+            }
+        }
+    };
+    (m16x16) => {
+        fallback_to_other_impl!(m16x16, m8x32);
+    };
+    (m32x8) => {
+        fallback_to_other_impl!(m32x8, m16x16);
+    };
+    (m64x4) => {
+        fallback_to_other_impl!(m64x4, m32x8);
+    };
+    (m128x2) => {
+        fallback_to_other_impl!(m128x2, m64x4);
+    };
+    // 512-bit wide masks
+    (m8x64) => {
+        impl All for m8x64 {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                let i: [u128; 4] = crate::mem::transmute(self);
+                let o: [u128; 4] = [u128::max_value(); 4];
+                i == o
+            }
+        }
+        impl Any for m8x64 {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                let i: [u128; 4] = crate::mem::transmute(self);
+                let o: [u128; 4] = [0; 4];
+                i != o
+            }
+        }
+    };
+    (m16x32) => {
+        fallback_to_other_impl!(m16x32, m8x64);
+    };
+    (m32x16) => {
+        fallback_to_other_impl!(m32x16, m16x32);
+    };
+    (m64x8) => {
+        fallback_to_other_impl!(m64x8, m32x16);
+    };
+    (m128x4) => {
+        fallback_to_other_impl!(m128x4, m64x8);
+    };
+    // Masks with pointer-sized elements64
+    (msizex2) => {
+        cfg_if! {
+            if #[cfg(target_pointer_width = "64")] {
+                fallback_to_other_impl!(msizex2, m64x2);
+            } else if #[cfg(target_pointer_width = "32")] {
+                fallback_to_other_impl!(msizex2, m32x2);
+            } else {
+                compile_error!("unsupported target_pointer_width");
+            }
+        }
+    };
+    (msizex4) => {
+        cfg_if! {
+            if #[cfg(target_pointer_width = "64")] {
+                fallback_to_other_impl!(msizex4, m64x4);
+            } else if #[cfg(target_pointer_width = "32")] {
+                fallback_to_other_impl!(msizex4, m32x4);
+            } else {
+                compile_error!("unsupported target_pointer_width");
+            }
+        }
+    };
+    (msizex8) => {
+        cfg_if! {
+            if #[cfg(target_pointer_width = "64")] {
+                fallback_to_other_impl!(msizex8, m64x8);
+            } else if #[cfg(target_pointer_width = "32")] {
+                fallback_to_other_impl!(msizex8, m32x8);
+            } else {
+                compile_error!("unsupported target_pointer_width");
+            }
+        }
+    };
+}
+
+macro_rules! recurse_half {
+    ($vid:ident, $vid_h:ident) => {
+        impl All for $vid {
+            #[inline]
+            unsafe fn all(self) -> bool {
+                union U {
+                    halves: ($vid_h, $vid_h),
+                    vec: $vid,
+                }
+                let halves = U { vec: self }.halves;
+                halves.0.all() && halves.1.all()
+            }
+        }
+        impl Any for $vid {
+            #[inline]
+            unsafe fn any(self) -> bool {
+                union U {
+                    halves: ($vid_h, $vid_h),
+                    vec: $vid,
+                }
+                let halves = U { vec: self }.halves;
+                halves.0.any() || halves.1.any()
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/reductions/mask/x86.rs.html b/src/packed_simd/codegen/reductions/mask/x86.rs.html new file mode 100644 index 000000000..ef32d9d8a --- /dev/null +++ b/src/packed_simd/codegen/reductions/mask/x86.rs.html @@ -0,0 +1,391 @@ +x86.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+
+//! Mask reductions implementation for `x86` and `x86_64` targets
+
+#[cfg(target_feature = "sse")]
+#[macro_use]
+mod sse;
+
+#[cfg(target_feature = "sse2")]
+#[macro_use]
+mod sse2;
+
+#[cfg(target_feature = "avx")]
+#[macro_use]
+mod avx;
+
+#[cfg(target_feature = "avx2")]
+#[macro_use]
+mod avx2;
+
+/// x86 64-bit m8x8 implementation
+macro_rules! x86_m8x8_impl {
+    ($id:ident) => {
+        cfg_if! {
+            if #[cfg(all(target_arch = "x86_64", target_feature = "sse"))] {
+                x86_m8x8_sse_impl!($id);
+            } else {
+                fallback_impl!($id);
+            }
+        }
+    };
+}
+
+/// x86 128-bit m8x16 implementation
+macro_rules! x86_m8x16_impl {
+    ($id:ident) => {
+        cfg_if! {
+            if #[cfg(target_feature = "sse2")] {
+                x86_m8x16_sse2_impl!($id);
+            } else {
+                fallback_impl!($id);
+            }
+        }
+    };
+}
+
+/// x86 128-bit m32x4 implementation
+macro_rules! x86_m32x4_impl {
+    ($id:ident) => {
+        cfg_if! {
+            if #[cfg(target_feature = "sse")] {
+                x86_m32x4_sse_impl!($id);
+            } else {
+                fallback_impl!($id);
+            }
+        }
+    };
+}
+
+/// x86 128-bit m64x2 implementation
+macro_rules! x86_m64x2_impl {
+    ($id:ident) => {
+        cfg_if! {
+            if #[cfg(target_feature = "sse2")] {
+                x86_m64x2_sse2_impl!($id);
+            } else if #[cfg(target_feature = "sse")] {
+                x86_m32x4_sse_impl!($id);
+            } else {
+                fallback_impl!($id);
+            }
+        }
+    };
+}
+
+/// x86 256-bit m8x32 implementation
+macro_rules! x86_m8x32_impl {
+    ($id:ident, $half_id:ident) => {
+        cfg_if! {
+            if #[cfg(target_feature = "avx2")] {
+                x86_m8x32_avx2_impl!($id);
+            } else if #[cfg(target_feature = "avx")] {
+                x86_m8x32_avx_impl!($id);
+            } else if #[cfg(target_feature = "sse2")] {
+                recurse_half!($id, $half_id);
+            } else {
+                fallback_impl!($id);
+            }
+        }
+    };
+}
+
+/// x86 256-bit m32x8 implementation
+macro_rules! x86_m32x8_impl {
+    ($id:ident, $half_id:ident) => {
+        cfg_if! {
+            if #[cfg(target_feature = "avx")] {
+                x86_m32x8_avx_impl!($id);
+            } else if #[cfg(target_feature = "sse")] {
+                recurse_half!($id, $half_id);
+            } else {
+                fallback_impl!($id);
+            }
+        }
+    };
+}
+
+/// x86 256-bit m64x4 implementation
+macro_rules! x86_m64x4_impl {
+    ($id:ident, $half_id:ident) => {
+        cfg_if! {
+            if #[cfg(target_feature = "avx")] {
+                x86_m64x4_avx_impl!($id);
+            } else if #[cfg(target_feature = "sse")] {
+                recurse_half!($id, $half_id);
+            } else {
+                fallback_impl!($id);
+            }
+        }
+    };
+}
+
+/// Fallback implementation.
+macro_rules! x86_intr_impl {
+    ($id:ident) => {
+    impl All for $id {
+        #[inline]
+        unsafe fn all(self) -> bool {
+        use crate::llvm::simd_reduce_all;
+            simd_reduce_all(self.0)
+        }
+    }
+        impl Any for $id {
+            #[inline]
+            unsafe fn any(self) -> bool {
+            use crate::llvm::simd_reduce_any;
+                simd_reduce_any(self.0)
+            }
+        }
+    };
+}
+
+/// Mask reduction implementation for `x86` and `x86_64` targets
+macro_rules! impl_mask_reductions {
+    // 64-bit wide masks
+    (m8x8) => { x86_m8x8_impl!(m8x8); };
+    (m16x4) => { x86_m8x8_impl!(m16x4); };
+    (m32x2) => { x86_m8x8_impl!(m32x2); };
+    // 128-bit wide masks
+    (m8x16) => { x86_m8x16_impl!(m8x16); };
+    (m16x8) => { x86_m8x16_impl!(m16x8); };
+    (m32x4) => { x86_m32x4_impl!(m32x4); };
+    (m64x2) => { x86_m64x2_impl!(m64x2); };
+    (m128x1) => { x86_intr_impl!(m128x1); };
+    // 256-bit wide masks:
+    (m8x32) => { x86_m8x32_impl!(m8x32, m8x16); };
+    (m16x16) => { x86_m8x32_impl!(m16x16, m16x8); };
+    (m32x8) => { x86_m32x8_impl!(m32x8, m32x4); };
+    (m64x4) => { x86_m64x4_impl!(m64x4, m64x2); };
+    (m128x2) => { x86_intr_impl!(m128x2); };
+    (msizex2) => {
+        cfg_if! {
+            if #[cfg(target_pointer_width = "64")] {
+                fallback_to_other_impl!(msizex2, m64x2);
+            } else if #[cfg(target_pointer_width = "32")] {
+                fallback_to_other_impl!(msizex2, m32x2);
+            } else {
+                compile_error!("unsupported target_pointer_width");
+            }
+        }
+    };
+    (msizex4) => {
+        cfg_if! {
+            if #[cfg(target_pointer_width = "64")] {
+                fallback_to_other_impl!(msizex4, m64x4);
+            } else if #[cfg(target_pointer_width = "32")] {
+                fallback_to_other_impl!(msizex4, m32x4);
+            } else {
+                compile_error!("unsupported target_pointer_width");
+            }
+        }
+    };
+    (msizex8) => {
+        cfg_if! {
+            if #[cfg(target_pointer_width = "64")] {
+                fallback_to_other_impl!(msizex8, m64x8);
+            } else if #[cfg(target_pointer_width = "32")] {
+                fallback_to_other_impl!(msizex8, m32x8);
+            } else {
+                compile_error!("unsupported target_pointer_width");
+            }
+        }
+    };
+
+    // Fallback to LLVM's default code-generation:
+    ($id:ident) => { fallback_impl!($id); };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/reductions/mask/x86/sse.rs.html b/src/packed_simd/codegen/reductions/mask/x86/sse.rs.html new file mode 100644 index 000000000..c986863c7 --- /dev/null +++ b/src/packed_simd/codegen/reductions/mask/x86/sse.rs.html @@ -0,0 +1,139 @@ +sse.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+
+//! Mask reductions implementation for `x86` and `x86_64` targets with `SSE`.
+#![allow(unused)]
+
+/// `x86`/`x86_64` 128-bit `m32x4` `SSE` implementation
+macro_rules! x86_m32x4_sse_impl {
+    ($id:ident) => {
+        impl All for $id {
+            #[inline]
+            #[target_feature(enable = "sse")]
+            unsafe fn all(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_ps;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_ps;
+                // _mm_movemask_ps(a) creates a 4bit mask containing the
+                // most significant bit of each lane of `a`. If all
+                // bits are set, then all 4 lanes of the mask are
+                // true.
+                _mm_movemask_ps(crate::mem::transmute(self))
+                    == 0b_1111_i32
+            }
+        }
+        impl Any for $id {
+            #[inline]
+            #[target_feature(enable = "sse")]
+            unsafe fn any(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_ps;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_ps;
+
+                _mm_movemask_ps(crate::mem::transmute(self)) != 0
+            }
+        }
+    };
+}
+
+macro_rules! x86_m8x8_sse_impl {
+    ($id:ident) => {
+        impl All for $id {
+            #[inline]
+            #[target_feature(enable = "sse")]
+            unsafe fn all(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_pi8;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_pi8;
+                // _mm_movemask_pi8(a) creates an 8bit mask containing the most
+                // significant bit of each byte of `a`. If all bits are set,
+                // then all 8 lanes of the mask are true.
+                _mm_movemask_pi8(crate::mem::transmute(self))
+                    == u8::max_value() as i32
+            }
+        }
+        impl Any for $id {
+            #[inline]
+            #[target_feature(enable = "sse")]
+            unsafe fn any(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_pi8;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_pi8;
+
+                _mm_movemask_pi8(crate::mem::transmute(self)) != 0
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/reductions/mask/x86/sse2.rs.html b/src/packed_simd/codegen/reductions/mask/x86/sse2.rs.html new file mode 100644 index 000000000..ab1ead281 --- /dev/null +++ b/src/packed_simd/codegen/reductions/mask/x86/sse2.rs.html @@ -0,0 +1,143 @@ +sse2.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+
+//! Mask reductions implementation for `x86` and `x86_64` targets with `SSE2`.
+#![allow(unused)]
+
+/// `x86`/`x86_64` 128-bit m64x2 `SSE2` implementation
+macro_rules! x86_m64x2_sse2_impl {
+    ($id:ident) => {
+        impl All for $id {
+            #[inline]
+            #[target_feature(enable = "sse")]
+            unsafe fn all(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_pd;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_pd;
+                // _mm_movemask_pd(a) creates a 2bit mask containing the
+                // most significant bit of each lane of `a`. If all
+                // bits are set, then all 2 lanes of the mask are
+                // true.
+                _mm_movemask_pd(crate::mem::transmute(self))
+                    == 0b_11_i32
+            }
+        }
+        impl Any for $id {
+            #[inline]
+            #[target_feature(enable = "sse")]
+            unsafe fn any(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_pd;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_pd;
+
+                _mm_movemask_pd(crate::mem::transmute(self)) != 0
+            }
+        }
+    };
+}
+
+/// `x86`/`x86_64` 128-bit m8x16 `SSE2` implementation
+macro_rules! x86_m8x16_sse2_impl {
+    ($id:ident) => {
+        impl All for $id {
+            #[inline]
+            #[target_feature(enable = "sse2")]
+            unsafe fn all(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_epi8;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_epi8;
+                // _mm_movemask_epi8(a) creates a 16bit mask containing the
+                // most significant bit of each byte of `a`. If all
+                // bits are set, then all 16 lanes of the mask are
+                // true.
+                _mm_movemask_epi8(crate::mem::transmute(self))
+                    == i32::from(u16::max_value())
+            }
+        }
+        impl Any for $id {
+            #[inline]
+            #[target_feature(enable = "sse2")]
+            unsafe fn any(self) -> bool {
+                #[cfg(target_arch = "x86")]
+                use crate::arch::x86::_mm_movemask_epi8;
+                #[cfg(target_arch = "x86_64")]
+                use crate::arch::x86_64::_mm_movemask_epi8;
+
+                _mm_movemask_epi8(crate::mem::transmute(self)) != 0
+            }
+        }
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/shuffle.rs.html b/src/packed_simd/codegen/shuffle.rs.html new file mode 100644 index 000000000..30f6055cf --- /dev/null +++ b/src/packed_simd/codegen/shuffle.rs.html @@ -0,0 +1,303 @@ +shuffle.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+
+//! Implementations of the `ShuffleResult` trait for the different numbers of
+//! lanes and vector element types.
+
+use crate::masks::*;
+use crate::sealed::{Shuffle, Seal};
+
+macro_rules! impl_shuffle {
+    ($array:ty, $base:ty, $out:ty) => {
+        impl Seal<$array> for $base {}
+        impl Shuffle<$array> for $base {
+            type Output = $out;
+        }
+    }
+}
+
+impl_shuffle! { [u32; 2], i8, crate::codegen::i8x2 }
+impl_shuffle! { [u32; 4], i8, crate::codegen::i8x4 }
+impl_shuffle! { [u32; 8], i8, crate::codegen::i8x8 }
+impl_shuffle! { [u32; 16], i8, crate::codegen::i8x16 }
+impl_shuffle! { [u32; 32], i8, crate::codegen::i8x32 }
+impl_shuffle! { [u32; 64], i8, crate::codegen::i8x64 }
+
+impl_shuffle! { [u32; 2], u8, crate::codegen::u8x2 }
+impl_shuffle! { [u32; 4], u8, crate::codegen::u8x4 }
+impl_shuffle! { [u32; 8], u8, crate::codegen::u8x8 }
+impl_shuffle! { [u32; 16], u8, crate::codegen::u8x16 }
+impl_shuffle! { [u32; 32], u8, crate::codegen::u8x32 }
+impl_shuffle! { [u32; 64], u8, crate::codegen::u8x64 }
+
+impl_shuffle! { [u32; 2], m8, crate::codegen::m8x2 }
+impl_shuffle! { [u32; 4], m8, crate::codegen::m8x4 }
+impl_shuffle! { [u32; 8], m8, crate::codegen::m8x8 }
+impl_shuffle! { [u32; 16], m8, crate::codegen::m8x16 }
+impl_shuffle! { [u32; 32], m8, crate::codegen::m8x32 }
+impl_shuffle! { [u32; 64], m8, crate::codegen::m8x64 }
+
+impl_shuffle! { [u32; 2], i16, crate::codegen::i16x2 }
+impl_shuffle! { [u32; 4], i16, crate::codegen::i16x4 }
+impl_shuffle! { [u32; 8], i16, crate::codegen::i16x8 }
+impl_shuffle! { [u32; 16], i16, crate::codegen::i16x16 }
+impl_shuffle! { [u32; 32], i16, crate::codegen::i16x32 }
+
+impl_shuffle! { [u32; 2], u16, crate::codegen::u16x2 }
+impl_shuffle! { [u32; 4], u16, crate::codegen::u16x4 }
+impl_shuffle! { [u32; 8], u16, crate::codegen::u16x8 }
+impl_shuffle! { [u32; 16], u16, crate::codegen::u16x16 }
+impl_shuffle! { [u32; 32], u16, crate::codegen::u16x32 }
+
+impl_shuffle! { [u32; 2], m16, crate::codegen::m16x2 }
+impl_shuffle! { [u32; 4], m16, crate::codegen::m16x4 }
+impl_shuffle! { [u32; 8], m16, crate::codegen::m16x8 }
+impl_shuffle! { [u32; 16], m16, crate::codegen::m16x16 }
+
+impl_shuffle! { [u32; 2], i32, crate::codegen::i32x2 }
+impl_shuffle! { [u32; 4], i32, crate::codegen::i32x4 }
+impl_shuffle! { [u32; 8], i32, crate::codegen::i32x8 }
+impl_shuffle! { [u32; 16], i32, crate::codegen::i32x16 }
+
+impl_shuffle! { [u32; 2], u32, crate::codegen::u32x2 }
+impl_shuffle! { [u32; 4], u32, crate::codegen::u32x4 }
+impl_shuffle! { [u32; 8], u32, crate::codegen::u32x8 }
+impl_shuffle! { [u32; 16], u32, crate::codegen::u32x16 }
+
+impl_shuffle! { [u32; 2], f32, crate::codegen::f32x2 }
+impl_shuffle! { [u32; 4], f32, crate::codegen::f32x4 }
+impl_shuffle! { [u32; 8], f32, crate::codegen::f32x8 }
+impl_shuffle! { [u32; 16], f32, crate::codegen::f32x16 }
+
+impl_shuffle! { [u32; 2], m32, crate::codegen::m32x2 }
+impl_shuffle! { [u32; 4], m32, crate::codegen::m32x4 }
+impl_shuffle! { [u32; 8], m32, crate::codegen::m32x8 }
+impl_shuffle! { [u32; 16], m32, crate::codegen::m32x16 }
+
+/* FIXME: 64-bit single element vector
+impl_shuffle! { [u32; 1], i64, crate::codegen::i64x1 }
+*/
+impl_shuffle! { [u32; 2], i64, crate::codegen::i64x2 }
+impl_shuffle! { [u32; 4], i64, crate::codegen::i64x4 }
+impl_shuffle! { [u32; 8], i64, crate::codegen::i64x8 }
+
+/* FIXME: 64-bit single element vector
+impl_shuffle! { [u32; 1], i64, crate::codegen::i64x1 }
+*/
+impl_shuffle! { [u32; 2], u64, crate::codegen::u64x2 }
+impl_shuffle! { [u32; 4], u64, crate::codegen::u64x4 }
+impl_shuffle! { [u32; 8], u64, crate::codegen::u64x8 }
+
+/* FIXME: 64-bit single element vector
+impl_shuffle! { [u32; 1], i64, crate::codegen::i64x1 }
+*/
+impl_shuffle! { [u32; 2], f64, crate::codegen::f64x2 }
+impl_shuffle! { [u32; 4], f64, crate::codegen::f64x4 }
+impl_shuffle! { [u32; 8], f64, crate::codegen::f64x8 }
+
+/* FIXME: 64-bit single element vector
+impl_shuffle! { [u32; 1], i64, crate::codegen::i64x1 }
+*/
+impl_shuffle! { [u32; 2], m64, crate::codegen::m64x2 }
+impl_shuffle! { [u32; 4], m64, crate::codegen::m64x4 }
+impl_shuffle! { [u32; 8], m64, crate::codegen::m64x8 }
+
+impl_shuffle! { [u32; 2], isize, crate::codegen::isizex2 }
+impl_shuffle! { [u32; 4], isize, crate::codegen::isizex4 }
+impl_shuffle! { [u32; 8], isize, crate::codegen::isizex8 }
+
+impl_shuffle! { [u32; 2], usize, crate::codegen::usizex2 }
+impl_shuffle! { [u32; 4], usize, crate::codegen::usizex4 }
+impl_shuffle! { [u32; 8], usize, crate::codegen::usizex8 }
+
+impl_shuffle! { [u32; 2], msize, crate::codegen::msizex2 }
+impl_shuffle! { [u32; 4], msize, crate::codegen::msizex4 }
+impl_shuffle! { [u32; 8], msize, crate::codegen::msizex8 }
+
+impl<T> Seal<[u32; 2]> for *const T {}
+impl<T> Shuffle<[u32; 2]> for *const T {
+    type Output = crate::codegen::cptrx2<T>;
+}
+impl<T> Seal<[u32; 4]> for *const T {}
+impl<T> Shuffle<[u32; 4]> for *const T {
+    type Output = crate::codegen::cptrx4<T>;
+}
+impl<T> Seal<[u32; 8]> for *const T {}
+impl<T> Shuffle<[u32; 8]> for *const T {
+    type Output = crate::codegen::cptrx8<T>;
+}
+
+impl<T> Seal<[u32; 2]> for *mut T {}
+impl<T> Shuffle<[u32; 2]> for *mut T {
+    type Output = crate::codegen::mptrx2<T>;
+}
+impl<T> Seal<[u32; 4]> for *mut T {}
+impl<T> Shuffle<[u32; 4]> for *mut T {
+    type Output = crate::codegen::mptrx4<T>;
+}
+impl<T> Seal<[u32; 8]> for *mut T {}
+impl<T> Shuffle<[u32; 8]> for *mut T {
+    type Output = crate::codegen::mptrx8<T>;
+}
+
+impl_shuffle! { [u32; 1], i128, crate::codegen::i128x1 }
+impl_shuffle! { [u32; 2], i128, crate::codegen::i128x2 }
+impl_shuffle! { [u32; 4], i128, crate::codegen::i128x4 }
+
+impl_shuffle! { [u32; 1], u128, crate::codegen::u128x1 }
+impl_shuffle! { [u32; 2], u128, crate::codegen::u128x2 }
+impl_shuffle! { [u32; 4], u128, crate::codegen::u128x4 }
+
+impl_shuffle! { [u32; 1], m128, crate::codegen::m128x1 }
+impl_shuffle! { [u32; 2], m128, crate::codegen::m128x2 }
+impl_shuffle! { [u32; 4], m128, crate::codegen::m128x4 }
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/shuffle1_dyn.rs.html b/src/packed_simd/codegen/shuffle1_dyn.rs.html new file mode 100644 index 000000000..b2b7c01ec --- /dev/null +++ b/src/packed_simd/codegen/shuffle1_dyn.rs.html @@ -0,0 +1,867 @@ +shuffle1_dyn.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+
+//! Shuffle vector lanes with run-time indices.
+
+use crate::*;
+
+pub trait Shuffle1Dyn {
+    type Indices;
+    fn shuffle1_dyn(self, _: Self::Indices) -> Self;
+}
+
+// Fallback implementation
+macro_rules! impl_fallback {
+    ($id:ident) => {
+        impl Shuffle1Dyn for $id {
+            type Indices = Self;
+            #[inline]
+            fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                let mut result = Self::splat(0);
+                for i in 0..$id::lanes() {
+                    result = result
+                        .replace(i, self.extract(indices.extract(i) as usize));
+                }
+                result
+            }
+        }
+    };
+}
+
+macro_rules! impl_shuffle1_dyn {
+    (u8x8) => {
+        cfg_if! {
+            if #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"),
+                         target_feature = "ssse3"))] {
+                impl Shuffle1Dyn for u8x8 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        #[cfg(target_arch = "x86")]
+                        use crate::arch::x86::_mm_shuffle_pi8;
+                        #[cfg(target_arch = "x86_64")]
+                        use crate::arch::x86_64::_mm_shuffle_pi8;
+
+                        unsafe {
+                            crate::mem::transmute(
+                                _mm_shuffle_pi8(
+                                    crate::mem::transmute(self.0),
+                                    crate::mem::transmute(indices.0)
+                                )
+                            )
+                        }
+                    }
+                }
+            } else if #[cfg(all(
+                any(
+                    all(target_aarch = "aarch64", target_feature = "neon"),
+                    all(target_aarch = "arm", target_feature = "v7",
+                        target_feature = "neon")
+                ),
+                any(feature = "core_arch", libcore_neon)
+            )
+            )] {
+                impl Shuffle1Dyn for u8x8 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        #[cfg(targt_arch = "aarch64")]
+                        use crate::arch::aarch64::vtbl1_u8;
+                        #[cfg(targt_arch = "arm")]
+                        use crate::arch::arm::vtbl1_u8;
+
+                        // This is safe because the binary is compiled with
+                        // neon enabled at compile-time and can therefore only
+                        // run on CPUs that have it enabled.
+                        unsafe {
+                            Simd(mem::transmute(
+                                vtbl1_u8(mem::transmute(self.0),
+                                        crate::mem::transmute(indices.0))
+                            ))
+                        }
+                    }
+                }
+            } else {
+                impl_fallback!(u8x8);
+            }
+        }
+    };
+    (u8x16) => {
+        cfg_if! {
+            if #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"),
+                         target_feature = "ssse3"))] {
+                impl Shuffle1Dyn for u8x16 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        #[cfg(target_arch = "x86")]
+                        use crate::arch::x86::_mm_shuffle_epi8;
+                        #[cfg(target_arch = "x86_64")]
+                        use crate::arch::x86_64::_mm_shuffle_epi8;
+                        // This is safe because the binary is compiled with
+                        // ssse3 enabled at compile-time and can therefore only
+                        // run on CPUs that have it enabled.
+                        unsafe {
+                            Simd(mem::transmute(
+                                _mm_shuffle_epi8(mem::transmute(self.0),
+                                                crate::mem::transmute(indices))
+                            ))
+                        }
+                    }
+                }
+            } else if #[cfg(all(target_aarch = "aarch64", target_feature = "neon",
+                                any(feature = "core_arch", libcore_neon)))] {
+                impl Shuffle1Dyn for u8x16 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        use crate::arch::aarch64::vqtbl1q_u8;
+
+                        // This is safe because the binary is compiled with
+                        // neon enabled at compile-time and can therefore only
+                        // run on CPUs that have it enabled.
+                        unsafe {
+                            Simd(mem::transmute(
+                                vqtbl1q_u8(mem::transmute(self.0),
+                                          crate::mem::transmute(indices.0))
+                            ))
+                        }
+                    }
+                }
+            } else if #[cfg(all(target_aarch = "arm", target_feature = "v7",
+                                target_feature = "neon",
+                                any(feature = "core_arch", libcore_neon)))] {
+                impl Shuffle1Dyn for u8x16 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        use crate::arch::arm::vtbl2_u8;
+
+                        // This is safe because the binary is compiled with
+                        // neon enabled at compile-time and can therefore only
+                        // run on CPUs that have it enabled.
+                        unsafe {
+                            union U {
+                                j: u8x16,
+                                s: (u8x8, u8x8),
+                            }
+
+                            let (i0, i1) = U { j: y }.s;
+
+                            let r0 = vtbl2_u8(
+                                mem::transmute(x),
+                                crate::mem::transmute(i0)
+                            );
+                            let r1 = vtbl2_u8(
+                                mem::transmute(x),
+                                crate::mem::transmute(i1)
+                            );
+
+                            let r = U { s: (r0, r1) }.j;
+
+                            Simd(mem::transmute(r))
+                        }
+                    }
+                }
+            } else {
+                impl_fallback!(u8x16);
+            }
+        }
+    };
+    (u16x8) => {
+        impl Shuffle1Dyn for u16x8 {
+            type Indices = Self;
+            #[inline]
+            fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                let indices: u8x8 = (indices * 2).cast();
+                let indices: u8x16 = shuffle!(
+                    indices, [0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7]
+                );
+                let v = u8x16::new(
+                    0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1
+                );
+                let indices = indices + v;
+                unsafe {
+                    let s: u8x16 =crate::mem::transmute(self);
+                   crate::mem::transmute(s.shuffle1_dyn(indices))
+                }
+            }
+        }
+    };
+    (u32x4) => {
+        cfg_if! {
+            if #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"),
+                         target_feature = "avx"))] {
+                impl Shuffle1Dyn for u32x4 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        #[cfg(target_arch = "x86")]
+                        use crate::arch::x86::{_mm_permutevar_ps};
+                        #[cfg(target_arch = "x86_64")]
+                        use crate::arch::x86_64::{_mm_permutevar_ps};
+
+                        unsafe {
+                            crate::mem::transmute(
+                                _mm_permutevar_ps(
+                                    crate::mem::transmute(self.0),
+                                    crate::mem::transmute(indices.0)
+                                )
+                            )
+                        }
+                    }
+                }
+            } else {
+                impl Shuffle1Dyn for u32x4 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        let indices: u8x4 = (indices * 4).cast();
+                        let indices: u8x16 = shuffle!(
+                            indices,
+                            [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]
+                        );
+                        let v = u8x16::new(
+                            0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3
+                        );
+                        let indices = indices + v;
+                        unsafe {
+                            let s: u8x16 =crate::mem::transmute(self);
+                           crate::mem::transmute(s.shuffle1_dyn(indices))
+                        }
+                    }
+                }
+            }
+        }
+    };
+    (u64x2) => {
+        cfg_if! {
+            if #[cfg(all(any(target_arch = "x86", target_arch = "x86_64"),
+                         target_feature = "avx"))] {
+                impl Shuffle1Dyn for u64x2 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        #[cfg(target_arch = "x86")]
+                        use crate::arch::x86::{_mm_permutevar_pd};
+                        #[cfg(target_arch = "x86_64")]
+                        use crate::arch::x86_64::{_mm_permutevar_pd};
+                        // _mm_permutevar_pd uses the _second_ bit of each
+                        // element to perform the selection, that is: 0b00 => 0,
+                        // 0b10 => 1:
+                        let indices = indices << 1;
+                        unsafe {
+                            crate::mem::transmute(
+                                _mm_permutevar_pd(
+                                    crate::mem::transmute(self),
+                                    crate::mem::transmute(indices)
+                                )
+                            )
+                        }
+                    }
+                }
+            } else {
+                impl Shuffle1Dyn for u64x2 {
+                    type Indices = Self;
+                    #[inline]
+                    fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                        let indices: u8x2 = (indices * 8).cast();
+                        let indices: u8x16 = shuffle!(
+                            indices,
+                            [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
+                        );
+                        let v = u8x16::new(
+                            0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7
+                        );
+                        let indices = indices + v;
+                        unsafe {
+                            let s: u8x16 =crate::mem::transmute(self);
+                           crate::mem::transmute(s.shuffle1_dyn(indices))
+                        }
+                    }
+                }
+            }
+        }
+    };
+    (u128x1) => {
+        impl Shuffle1Dyn for u128x1 {
+            type Indices = Self;
+            #[inline]
+            fn shuffle1_dyn(self, _indices: Self::Indices) -> Self {
+                self
+            }
+        }
+    };
+    ($id:ident) => { impl_fallback!($id); }
+}
+
+impl_shuffle1_dyn!(u8x2);
+impl_shuffle1_dyn!(u8x4);
+impl_shuffle1_dyn!(u8x8);
+impl_shuffle1_dyn!(u8x16);
+impl_shuffle1_dyn!(u8x32);
+impl_shuffle1_dyn!(u8x64);
+
+impl_shuffle1_dyn!(u16x2);
+impl_shuffle1_dyn!(u16x4);
+impl_shuffle1_dyn!(u16x8);
+impl_shuffle1_dyn!(u16x16);
+impl_shuffle1_dyn!(u16x32);
+
+impl_shuffle1_dyn!(u32x2);
+impl_shuffle1_dyn!(u32x4);
+impl_shuffle1_dyn!(u32x8);
+impl_shuffle1_dyn!(u32x16);
+
+impl_shuffle1_dyn!(u64x2);
+impl_shuffle1_dyn!(u64x4);
+impl_shuffle1_dyn!(u64x8);
+
+impl_shuffle1_dyn!(usizex2);
+impl_shuffle1_dyn!(usizex4);
+impl_shuffle1_dyn!(usizex8);
+
+impl_shuffle1_dyn!(u128x1);
+impl_shuffle1_dyn!(u128x2);
+impl_shuffle1_dyn!(u128x4);
+
+// Implementation for non-unsigned vector types
+macro_rules! impl_shuffle1_dyn_non_u {
+    ($id:ident, $uid:ident) => {
+        impl Shuffle1Dyn for $id {
+            type Indices = $uid;
+            #[inline]
+            fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                unsafe {
+                    let u: $uid = crate::mem::transmute(self);
+                    crate::mem::transmute(u.shuffle1_dyn(indices))
+                }
+            }
+        }
+    };
+}
+
+impl_shuffle1_dyn_non_u!(i8x2, u8x2);
+impl_shuffle1_dyn_non_u!(i8x4, u8x4);
+impl_shuffle1_dyn_non_u!(i8x8, u8x8);
+impl_shuffle1_dyn_non_u!(i8x16, u8x16);
+impl_shuffle1_dyn_non_u!(i8x32, u8x32);
+impl_shuffle1_dyn_non_u!(i8x64, u8x64);
+
+impl_shuffle1_dyn_non_u!(i16x2, u16x2);
+impl_shuffle1_dyn_non_u!(i16x4, u16x4);
+impl_shuffle1_dyn_non_u!(i16x8, u16x8);
+impl_shuffle1_dyn_non_u!(i16x16, u16x16);
+impl_shuffle1_dyn_non_u!(i16x32, u16x32);
+
+impl_shuffle1_dyn_non_u!(i32x2, u32x2);
+impl_shuffle1_dyn_non_u!(i32x4, u32x4);
+impl_shuffle1_dyn_non_u!(i32x8, u32x8);
+impl_shuffle1_dyn_non_u!(i32x16, u32x16);
+
+impl_shuffle1_dyn_non_u!(i64x2, u64x2);
+impl_shuffle1_dyn_non_u!(i64x4, u64x4);
+impl_shuffle1_dyn_non_u!(i64x8, u64x8);
+
+impl_shuffle1_dyn_non_u!(isizex2, usizex2);
+impl_shuffle1_dyn_non_u!(isizex4, usizex4);
+impl_shuffle1_dyn_non_u!(isizex8, usizex8);
+
+impl_shuffle1_dyn_non_u!(i128x1, u128x1);
+impl_shuffle1_dyn_non_u!(i128x2, u128x2);
+impl_shuffle1_dyn_non_u!(i128x4, u128x4);
+
+impl_shuffle1_dyn_non_u!(m8x2, u8x2);
+impl_shuffle1_dyn_non_u!(m8x4, u8x4);
+impl_shuffle1_dyn_non_u!(m8x8, u8x8);
+impl_shuffle1_dyn_non_u!(m8x16, u8x16);
+impl_shuffle1_dyn_non_u!(m8x32, u8x32);
+impl_shuffle1_dyn_non_u!(m8x64, u8x64);
+
+impl_shuffle1_dyn_non_u!(m16x2, u16x2);
+impl_shuffle1_dyn_non_u!(m16x4, u16x4);
+impl_shuffle1_dyn_non_u!(m16x8, u16x8);
+impl_shuffle1_dyn_non_u!(m16x16, u16x16);
+impl_shuffle1_dyn_non_u!(m16x32, u16x32);
+
+impl_shuffle1_dyn_non_u!(m32x2, u32x2);
+impl_shuffle1_dyn_non_u!(m32x4, u32x4);
+impl_shuffle1_dyn_non_u!(m32x8, u32x8);
+impl_shuffle1_dyn_non_u!(m32x16, u32x16);
+
+impl_shuffle1_dyn_non_u!(m64x2, u64x2);
+impl_shuffle1_dyn_non_u!(m64x4, u64x4);
+impl_shuffle1_dyn_non_u!(m64x8, u64x8);
+
+impl_shuffle1_dyn_non_u!(msizex2, usizex2);
+impl_shuffle1_dyn_non_u!(msizex4, usizex4);
+impl_shuffle1_dyn_non_u!(msizex8, usizex8);
+
+impl_shuffle1_dyn_non_u!(m128x1, u128x1);
+impl_shuffle1_dyn_non_u!(m128x2, u128x2);
+impl_shuffle1_dyn_non_u!(m128x4, u128x4);
+
+impl_shuffle1_dyn_non_u!(f32x2, u32x2);
+impl_shuffle1_dyn_non_u!(f32x4, u32x4);
+impl_shuffle1_dyn_non_u!(f32x8, u32x8);
+impl_shuffle1_dyn_non_u!(f32x16, u32x16);
+
+impl_shuffle1_dyn_non_u!(f64x2, u64x2);
+impl_shuffle1_dyn_non_u!(f64x4, u64x4);
+impl_shuffle1_dyn_non_u!(f64x8, u64x8);
+
+// Implementation for non-unsigned vector types
+macro_rules! impl_shuffle1_dyn_ptr {
+    ($id:ident, $uid:ident) => {
+        impl<T> Shuffle1Dyn for $id<T> {
+            type Indices = $uid;
+            #[inline]
+            fn shuffle1_dyn(self, indices: Self::Indices) -> Self {
+                unsafe {
+                    let u: $uid = crate::mem::transmute(self);
+                    crate::mem::transmute(u.shuffle1_dyn(indices))
+                }
+            }
+        }
+    };
+}
+
+impl_shuffle1_dyn_ptr!(cptrx2, usizex2);
+impl_shuffle1_dyn_ptr!(cptrx4, usizex4);
+impl_shuffle1_dyn_ptr!(cptrx8, usizex8);
+
+impl_shuffle1_dyn_ptr!(mptrx2, usizex2);
+impl_shuffle1_dyn_ptr!(mptrx4, usizex4);
+impl_shuffle1_dyn_ptr!(mptrx8, usizex8);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/swap_bytes.rs.html b/src/packed_simd/codegen/swap_bytes.rs.html new file mode 100644 index 000000000..98a57b5a5 --- /dev/null +++ b/src/packed_simd/codegen/swap_bytes.rs.html @@ -0,0 +1,381 @@ +swap_bytes.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+
+//! Horizontal swap bytes reductions.
+
+// FIXME: investigate using `llvm.bswap`
+// https://github.com/rust-lang-nursery/packed_simd/issues/19
+
+use crate::*;
+
+crate trait SwapBytes {
+    fn swap_bytes(self) -> Self;
+}
+
+macro_rules! impl_swap_bytes {
+    (v16: $($id:ident,)+) => {
+        $(
+            impl SwapBytes for $id {
+                #[inline]
+                fn swap_bytes(self) -> Self {
+                    unsafe { shuffle!(self, [1, 0]) }
+                }
+            }
+        )+
+    };
+    (v32: $($id:ident,)+) => {
+        $(
+            impl SwapBytes for $id {
+                #[inline]
+                #[allow(clippy::useless_transmute)]
+                fn swap_bytes(self) -> Self {
+                    unsafe {
+                        let bytes: u8x4 = crate::mem::transmute(self);
+                        let result: u8x4 = shuffle!(bytes, [3, 2, 1, 0]);
+                        crate::mem::transmute(result)
+                    }
+                }
+            }
+        )+
+    };
+    (v64: $($id:ident,)+) => {
+        $(
+            impl SwapBytes for $id {
+                #[inline]
+                #[allow(clippy::useless_transmute)]
+                fn swap_bytes(self) -> Self {
+                    unsafe {
+                        let bytes: u8x8 = crate::mem::transmute(self);
+                        let result: u8x8 = shuffle!(
+                            bytes, [7, 6, 5, 4, 3, 2, 1, 0]
+                        );
+                        crate::mem::transmute(result)
+                    }
+                }
+            }
+        )+
+    };
+    (v128: $($id:ident,)+) => {
+        $(
+            impl SwapBytes for $id {
+                #[inline]
+                #[allow(clippy::useless_transmute)]
+                fn swap_bytes(self) -> Self {
+                    unsafe {
+                        let bytes: u8x16 = crate::mem::transmute(self);
+                        let result: u8x16 = shuffle!(bytes, [
+                            15, 14, 13, 12, 11, 10, 9, 8,
+                            7, 6, 5, 4, 3, 2, 1, 0
+                        ]);
+                        crate::mem::transmute(result)
+                    }
+                }
+            }
+        )+
+    };
+    (v256: $($id:ident,)+) => {
+        $(
+            impl SwapBytes for $id {
+                #[inline]
+                #[allow(clippy::useless_transmute)]
+                fn swap_bytes(self) -> Self {
+                    unsafe {
+                        let bytes: u8x32 = crate::mem::transmute(self);
+                        let result: u8x32 = shuffle!(bytes, [
+                            31, 30, 29, 28, 27, 26, 25, 24,
+                            23, 22, 21, 20, 19, 18, 17, 16,
+                            15, 14, 13, 12, 11, 10, 9,  8,
+                            7,  6,  5,  4,  3,  2,  1,  0
+                        ]);
+                        crate::mem::transmute(result)
+                    }
+                }
+            }
+        )+
+    };
+    (v512: $($id:ident,)+) => {
+        $(
+            impl SwapBytes for $id {
+                #[inline]
+                #[allow(clippy::useless_transmute)]
+                fn swap_bytes(self) -> Self {
+                    unsafe {
+                        let bytes: u8x64 = crate::mem::transmute(self);
+                        let result: u8x64 = shuffle!(bytes, [
+                            63, 62, 61, 60, 59, 58, 57, 56,
+                            55, 54, 53, 52, 51, 50, 49, 48,
+                            47, 46, 45, 44, 43, 42, 41, 40,
+                            39, 38, 37, 36, 35, 34, 33, 32,
+                            31, 30, 29, 28, 27, 26, 25, 24,
+                            23, 22, 21, 20, 19, 18, 17, 16,
+                            15, 14, 13, 12, 11, 10, 9,  8,
+                            7,  6,  5,  4,  3,  2,  1,  0
+                        ]);
+                        crate::mem::transmute(result)
+                    }
+                }
+            }
+        )+
+    };
+}
+
+impl_swap_bytes!(v16: u8x2, i8x2,);
+impl_swap_bytes!(v32: u8x4, i8x4, u16x2, i16x2,);
+// FIXME: 64-bit single element vector
+impl_swap_bytes!(
+    v64: u8x8,
+    i8x8,
+    u16x4,
+    i16x4,
+    u32x2,
+    i32x2, /* u64x1, i64x1, */
+);
+
+impl_swap_bytes!(
+    v128: u8x16,
+    i8x16,
+    u16x8,
+    i16x8,
+    u32x4,
+    i32x4,
+    u64x2,
+    i64x2,
+    u128x1,
+    i128x1,
+);
+impl_swap_bytes!(
+    v256: u8x32,
+    i8x32,
+    u16x16,
+    i16x16,
+    u32x8,
+    i32x8,
+    u64x4,
+    i64x4,
+    u128x2,
+    i128x2,
+);
+
+impl_swap_bytes!(
+    v512: u8x64,
+    i8x64,
+    u16x32,
+    i16x32,
+    u32x16,
+    i32x16,
+    u64x8,
+    i64x8,
+    u128x4,
+    i128x4,
+);
+
+cfg_if! {
+    if #[cfg(target_pointer_width = "8")] {
+        impl_swap_bytes!(v16: isizex2, usizex2,);
+        impl_swap_bytes!(v32: isizex4, usizex4,);
+        impl_swap_bytes!(v64: isizex8, usizex8,);
+    } else if #[cfg(target_pointer_width = "16")] {
+        impl_swap_bytes!(v32: isizex2, usizex2,);
+        impl_swap_bytes!(v64: isizex4, usizex4,);
+        impl_swap_bytes!(v128: isizex8, usizex8,);
+    } else if #[cfg(target_pointer_width = "32")] {
+        impl_swap_bytes!(v64: isizex2, usizex2,);
+        impl_swap_bytes!(v128: isizex4, usizex4,);
+        impl_swap_bytes!(v256: isizex8, usizex8,);
+    } else if #[cfg(target_pointer_width = "64")] {
+        impl_swap_bytes!(v128: isizex2, usizex2,);
+        impl_swap_bytes!(v256: isizex4, usizex4,);
+        impl_swap_bytes!(v512: isizex8, usizex8,);
+    } else {
+        compile_error!("unsupported target_pointer_width");
+    }
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/v128.rs.html b/src/packed_simd/codegen/v128.rs.html new file mode 100644 index 000000000..5ce1b0c8d --- /dev/null +++ b/src/packed_simd/codegen/v128.rs.html @@ -0,0 +1,95 @@ +v128.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+
+//! Internal 128-bit wide vector types
+
+use crate::masks::*;
+
+#[rustfmt::skip]
+impl_simd_array!(
+    [i8; 16]: i8x16 |
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [u8; 16]: u8x16 |
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [m8; 16]: m8x16 |
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8
+);
+
+impl_simd_array!([i16; 8]: i16x8 | i16, i16, i16, i16, i16, i16, i16, i16);
+impl_simd_array!([u16; 8]: u16x8 | u16, u16, u16, u16, u16, u16, u16, u16);
+impl_simd_array!([m16; 8]: m16x8 | i16, i16, i16, i16, i16, i16, i16, i16);
+
+impl_simd_array!([i32; 4]: i32x4 | i32, i32, i32, i32);
+impl_simd_array!([u32; 4]: u32x4 | u32, u32, u32, u32);
+impl_simd_array!([f32; 4]: f32x4 | f32, f32, f32, f32);
+impl_simd_array!([m32; 4]: m32x4 | i32, i32, i32, i32);
+
+impl_simd_array!([i64; 2]: i64x2 | i64, i64);
+impl_simd_array!([u64; 2]: u64x2 | u64, u64);
+impl_simd_array!([f64; 2]: f64x2 | f64, f64);
+impl_simd_array!([m64; 2]: m64x2 | i64, i64);
+
+impl_simd_array!([i128; 1]: i128x1 | i128);
+impl_simd_array!([u128; 1]: u128x1 | u128);
+impl_simd_array!([m128; 1]: m128x1 | i128);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/v16.rs.html b/src/packed_simd/codegen/v16.rs.html new file mode 100644 index 000000000..ca4afc878 --- /dev/null +++ b/src/packed_simd/codegen/v16.rs.html @@ -0,0 +1,17 @@ +v16.rs.html -- source
1
+2
+3
+4
+5
+6
+7
+
+//! Internal 16-bit wide vector types
+
+use crate::masks::*;
+
+impl_simd_array!([i8; 2]: i8x2 | i8, i8);
+impl_simd_array!([u8; 2]: u8x2 | u8, u8);
+impl_simd_array!([m8; 2]: m8x2 | i8, i8);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/v256.rs.html b/src/packed_simd/codegen/v256.rs.html new file mode 100644 index 000000000..7bdbe067b --- /dev/null +++ b/src/packed_simd/codegen/v256.rs.html @@ -0,0 +1,159 @@ +v256.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+
+//! Internal 256-bit wide vector types
+
+use crate::masks::*;
+
+#[rustfmt::skip]
+impl_simd_array!(
+    [i8; 32]: i8x32 |
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [u8; 32]: u8x32 |
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [m8; 32]: m8x32 |
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [i16; 16]: i16x16 |
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [u16; 16]: u16x16 |
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [m16; 16]: m16x16 |
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16
+);
+
+impl_simd_array!([i32; 8]: i32x8 | i32, i32, i32, i32, i32, i32, i32, i32);
+impl_simd_array!([u32; 8]: u32x8 | u32, u32, u32, u32, u32, u32, u32, u32);
+impl_simd_array!([f32; 8]: f32x8 | f32, f32, f32, f32, f32, f32, f32, f32);
+impl_simd_array!([m32; 8]: m32x8 | i32, i32, i32, i32, i32, i32, i32, i32);
+
+impl_simd_array!([i64; 4]: i64x4 | i64, i64, i64, i64);
+impl_simd_array!([u64; 4]: u64x4 | u64, u64, u64, u64);
+impl_simd_array!([f64; 4]: f64x4 | f64, f64, f64, f64);
+impl_simd_array!([m64; 4]: m64x4 | i64, i64, i64, i64);
+
+impl_simd_array!([i128; 2]: i128x2 | i128, i128);
+impl_simd_array!([u128; 2]: u128x2 | u128, u128);
+impl_simd_array!([m128; 2]: m128x2 | i128, i128);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/v32.rs.html b/src/packed_simd/codegen/v32.rs.html new file mode 100644 index 000000000..1724083bf --- /dev/null +++ b/src/packed_simd/codegen/v32.rs.html @@ -0,0 +1,25 @@ +v32.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+
+//! Internal 32-bit wide vector types
+
+use crate::masks::*;
+
+impl_simd_array!([i8; 4]: i8x4 | i8, i8, i8, i8);
+impl_simd_array!([u8; 4]: u8x4 | u8, u8, u8, u8);
+impl_simd_array!([m8; 4]: m8x4 | i8, i8, i8, i8);
+
+impl_simd_array!([i16; 2]: i16x2 | i16, i16);
+impl_simd_array!([u16; 2]: u16x2 | u16, u16);
+impl_simd_array!([m16; 2]: m16x2 | i16, i16);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/v512.rs.html b/src/packed_simd/codegen/v512.rs.html new file mode 100644 index 000000000..4d06fae14 --- /dev/null +++ b/src/packed_simd/codegen/v512.rs.html @@ -0,0 +1,293 @@ +v512.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+
+//! Internal 512-bit wide vector types
+
+use crate::masks::*;
+
+#[rustfmt::skip]
+impl_simd_array!(
+    [i8; 64]: i8x64 |
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [u8; 64]: u8x64 |
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8,
+    u8, u8, u8, u8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [m8; 64]: m8x64 |
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8,
+    i8, i8, i8, i8
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [i16; 32]: i16x32 |
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [u16; 32]: u16x32 |
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16,
+    u16, u16, u16, u16
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [m16; 32]: m16x32 |
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16,
+    i16, i16, i16, i16
+);
+
+#[rustfmt::skip]
+impl_simd_array!(
+    [i32; 16]: i32x16 |
+    i32, i32, i32, i32,
+    i32, i32, i32, i32,
+    i32, i32, i32, i32,
+    i32, i32, i32, i32
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [u32; 16]: u32x16 |
+    u32, u32, u32, u32,
+    u32, u32, u32, u32,
+    u32, u32, u32, u32,
+    u32, u32, u32, u32
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [f32; 16]: f32x16 |
+    f32, f32, f32, f32,
+    f32, f32, f32, f32,
+    f32, f32, f32, f32,
+    f32, f32, f32, f32
+);
+#[rustfmt::skip]
+impl_simd_array!(
+    [m32; 16]: m32x16 |
+    i32, i32, i32, i32,
+    i32, i32, i32, i32,
+    i32, i32, i32, i32,
+    i32, i32, i32, i32
+);
+
+impl_simd_array!([i64; 8]: i64x8 | i64, i64, i64, i64, i64, i64, i64, i64);
+impl_simd_array!([u64; 8]: u64x8 | u64, u64, u64, u64, u64, u64, u64, u64);
+impl_simd_array!([f64; 8]: f64x8 | f64, f64, f64, f64, f64, f64, f64, f64);
+impl_simd_array!([m64; 8]: m64x8 | i64, i64, i64, i64, i64, i64, i64, i64);
+
+impl_simd_array!([i128; 4]: i128x4 | i128, i128, i128, i128);
+impl_simd_array!([u128; 4]: u128x4 | u128, u128, u128, u128);
+impl_simd_array!([m128; 4]: m128x4 | i128, i128, i128, i128);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/v64.rs.html b/src/packed_simd/codegen/v64.rs.html new file mode 100644 index 000000000..f196f6723 --- /dev/null +++ b/src/packed_simd/codegen/v64.rs.html @@ -0,0 +1,45 @@ +v64.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+
+//! Internal 64-bit wide vector types
+
+use crate::masks::*;
+
+impl_simd_array!([i8; 8]: i8x8 | i8, i8, i8, i8, i8, i8, i8, i8);
+impl_simd_array!([u8; 8]: u8x8 | u8, u8, u8, u8, u8, u8, u8, u8);
+impl_simd_array!([m8; 8]: m8x8 | i8, i8, i8, i8, i8, i8, i8, i8);
+
+impl_simd_array!([i16; 4]: i16x4 | i16, i16, i16, i16);
+impl_simd_array!([u16; 4]: u16x4 | u16, u16, u16, u16);
+impl_simd_array!([m16; 4]: m16x4 | i16, i16, i16, i16);
+
+impl_simd_array!([i32; 2]: i32x2 | i32, i32);
+impl_simd_array!([u32; 2]: u32x2 | u32, u32);
+impl_simd_array!([f32; 2]: f32x2 | f32, f32);
+impl_simd_array!([m32; 2]: m32x2 | i32, i32);
+
+impl_simd_array!([i64; 1]: i64x1 | i64);
+impl_simd_array!([u64; 1]: u64x1 | u64);
+impl_simd_array!([f64; 1]: f64x1 | f64);
+impl_simd_array!([m64; 1]: m64x1 | i64);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/vPtr.rs.html b/src/packed_simd/codegen/vPtr.rs.html new file mode 100644 index 000000000..0b36e6ae2 --- /dev/null +++ b/src/packed_simd/codegen/vPtr.rs.html @@ -0,0 +1,73 @@ +vPtr.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+
+//! Pointer vector types
+
+macro_rules! impl_simd_ptr {
+    ([$ptr_ty:ty; $elem_count:expr]: $tuple_id:ident | $ty:ident
+     | $($tys:ty),*) => {
+        #[derive(Copy, Clone)]
+        #[repr(simd)]
+        pub struct $tuple_id<$ty>($(crate $tys),*);
+        //^^^^^^^ leaked through SimdArray
+
+        impl<$ty> crate::sealed::Seal for [$ptr_ty; $elem_count] {}
+        impl<$ty> crate::sealed::SimdArray for [$ptr_ty; $elem_count] {
+            type Tuple = $tuple_id<$ptr_ty>;
+            type T = $ptr_ty;
+            const N: usize = $elem_count;
+            type NT = [u32; $elem_count];
+        }
+
+        impl<$ty> crate::sealed::Seal for $tuple_id<$ptr_ty> {}
+        impl<$ty> crate::sealed::Simd for $tuple_id<$ptr_ty> {
+            type Element = $ptr_ty;
+            const LANES: usize = $elem_count;
+            type LanesType = [u32; $elem_count];
+        }
+
+    }
+}
+
+impl_simd_ptr!([*const T; 2]: cptrx2 | T | T, T);
+impl_simd_ptr!([*const T; 4]: cptrx4 | T | T, T, T, T);
+impl_simd_ptr!([*const T; 8]: cptrx8 | T | T, T, T, T, T, T, T, T);
+
+impl_simd_ptr!([*mut T; 2]: mptrx2 | T | T, T);
+impl_simd_ptr!([*mut T; 4]: mptrx4 | T | T, T, T, T);
+impl_simd_ptr!([*mut T; 8]: mptrx8 | T | T, T, T, T, T, T, T, T);
+
+
\ No newline at end of file diff --git a/src/packed_simd/codegen/vSize.rs.html b/src/packed_simd/codegen/vSize.rs.html new file mode 100644 index 000000000..8d578a82c --- /dev/null +++ b/src/packed_simd/codegen/vSize.rs.html @@ -0,0 +1,89 @@ +vSize.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+
+//! Vector types with pointer-sized elements
+
+use crate::codegen::pointer_sized_int::{isize_, usize_};
+use crate::masks::*;
+
+impl_simd_array!([isize; 2]: isizex2 | isize_, isize_);
+impl_simd_array!([usize; 2]: usizex2 | usize_, usize_);
+impl_simd_array!([msize; 2]: msizex2 | isize_, isize_);
+
+impl_simd_array!([isize; 4]: isizex4 | isize_, isize_, isize_, isize_);
+impl_simd_array!([usize; 4]: usizex4 | usize_, usize_, usize_, usize_);
+impl_simd_array!([msize; 4]: msizex4 | isize_, isize_, isize_, isize_);
+
+impl_simd_array!(
+    [isize; 8]: isizex8 | isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_
+);
+impl_simd_array!(
+    [usize; 8]: usizex8 | usize_,
+    usize_,
+    usize_,
+    usize_,
+    usize_,
+    usize_,
+    usize_,
+    usize_
+);
+impl_simd_array!(
+    [msize; 8]: msizex8 | isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_,
+    isize_
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/lib.rs.html b/src/packed_simd/lib.rs.html new file mode 100644 index 000000000..5c375bf5d --- /dev/null +++ b/src/packed_simd/lib.rs.html @@ -0,0 +1,663 @@ +lib.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+
+//! # Portable packed SIMD vectors
+//!
+//! This crate is proposed for stabilization as `std::packed_simd` in [RFC2366:
+//! `std::simd`](https://github.com/rust-lang/rfcs/pull/2366) .
+//!
+//! The examples available in the
+//! [`examples/`](https://github.com/rust-lang-nursery/packed_simd/tree/master/examples)
+//! sub-directory of the crate showcase how to use the library in practice.
+//!
+//! ## Table of contents
+//!
+//! - [Introduction](#introduction)
+//! - [Vector types](#vector-types)
+//! - [Conditional operations](#conditional-operations)
+//! - [Conversions](#conversions)
+//! - [Performance
+//!   guide](https://rust-lang-nursery.github.io/packed_simd/perf-guide/)
+//!
+//! ## Introduction
+//!
+//! This crate exports [`Simd<[T; N]>`][`Simd`]: a packed vector of `N`
+//! elements of type `T` as well as many type aliases for this type: for
+//! example, [`f32x4`], which is just an alias for `Simd<[f32; 4]>`.
+//!
+//! The operations on packed vectors are, by default, "vertical", that is, they
+//! are applied to each vector lane in isolation of the others:
+//!
+//! ```
+//! # use packed_simd::*;
+//! let a = i32x4::new(1, 2, 3, 4);
+//! let b = i32x4::new(5, 6, 7, 8);
+//! assert_eq!(a + b, i32x4::new(6, 8, 10, 12));
+//! ```
+//!
+//! Many "horizontal" operations are also provided:
+//!
+//! ```
+//! # use packed_simd::*;
+//! # let a = i32x4::new(1, 2, 3, 4);
+//! assert_eq!(a.wrapping_sum(), 10);
+//! ```
+//!
+//! In virtually all architectures vertical operations are fast, while
+//! horizontal operations are, by comparison, much slower. That is, the
+//! most portably-efficient way of performing a reduction over a slice
+//! is to collect the results into a vector using vertical operations,
+//! and performing a single horizontal operation at the end:
+//!
+//! ```
+//! # use packed_simd::*;
+//! fn reduce(x: &[i32]) -> i32 {
+//!     assert!(x.len() % 4 == 0);
+//!     let mut sum = i32x4::splat(0); // [0, 0, 0, 0]
+//!     for i in (0..x.len()).step_by(4) {
+//!         sum += i32x4::from_slice_unaligned(&x[i..]);
+//!     }
+//!     sum.wrapping_sum()
+//! }
+//!
+//! let x = [0, 1, 2, 3, 4, 5, 6, 7];
+//! assert_eq!(reduce(&x), 28);
+//! ```
+//!
+//! ## Vector types
+//!
+//! The vector type aliases are named according to the following scheme:
+//!
+//! > `{element_type}x{number_of_lanes} == Simd<[element_type;
+//! number_of_lanes]>`
+//!
+//! where the following element types are supported:
+//!
+//! * `i{element_width}`: signed integer
+//! * `u{element_width}`: unsigned integer
+//! * `f{element_width}`: float
+//! * `m{element_width}`: mask (see below)
+//! * `*{const,mut} T`: `const` and `mut` pointers
+//!
+//! ## Basic operations
+//!
+//! ```
+//! # use packed_simd::*;
+//! // Sets all elements to `0`:
+//! let a = i32x4::splat(0);
+//!
+//! // Reads a vector from a slice:
+//! let mut arr = [0, 0, 0, 1, 2, 3, 4, 5];
+//! let b = i32x4::from_slice_unaligned(&arr);
+//!
+//! // Reads the 4-th element of a vector:
+//! assert_eq!(b.extract(3), 1);
+//!
+//! // Returns a new vector where the 4-th element is replaced with `1`:
+//! let a = a.replace(3, 1);
+//! assert_eq!(a, b);
+//!
+//! // Writes a vector to a slice:
+//! let a = a.replace(2, 1);
+//! a.write_to_slice_unaligned(&mut arr[4..]);
+//! assert_eq!(arr, [0, 0, 0, 1, 0, 0, 1, 1]);
+//! ```
+//!
+//! ## Conditional operations
+//!
+//! One often needs to perform an operation on some lanes of the vector. Vector
+//! masks, like `m32x4`, allow selecting on which vector lanes an operation is
+//! to be performed:
+//!
+//! ```
+//! # use packed_simd::*;
+//! let a = i32x4::new(1, 1, 2, 2);
+//!
+//! // Add `1` to the first two lanes of the vector.
+//! let m = m16x4::new(true, true, false, false);
+//! let a = m.select(a + 1, a);
+//! assert_eq!(a, i32x4::splat(2));
+//! ```
+//!
+//! The elements of a vector mask are either `true` or `false`. Here `true`
+//! means that a lane is "selected", while `false` means that a lane is not
+//! selected.
+//!
+//! All vector masks implement a `mask.select(a: T, b: T) -> T` method that
+//! works on all vectors that have the same number of lanes as the mask. The
+//! resulting vector contains the elements of `a` for those lanes for which the
+//! mask is `true`, and the elements of `b` otherwise.
+//!
+//! The example constructs a mask with the first two lanes set to `true` and
+//! the last two lanes set to `false`. This selects the first two lanes of `a +
+//! 1` and the last two lanes of `a`, producing a vector where the first two
+//! lanes have been incremented by `1`.
+//!
+//! > note: mask `select` can be used on vector types that have the same number
+//! > of lanes as the mask. The example shows this by using [`m16x4`] instead
+//! > of [`m32x4`]. It is _typically_ more performant to use a mask element
+//! > width equal to the element width of the vectors being operated upon.
+//! > This is, however, not true for 512-bit wide vectors when targetting
+//! > AVX-512, where the most efficient masks use only 1-bit per element.
+//!
+//! All vertical comparison operations returns masks:
+//!
+//! ```
+//! # use packed_simd::*;
+//! let a = i32x4::new(1, 1, 3, 3);
+//! let b = i32x4::new(2, 2, 0, 0);
+//!
+//! // ge: >= (Greater Eequal; see also lt, le, gt, eq, ne).
+//! let m = a.ge(i32x4::splat(2));
+//!
+//! if m.any() {
+//!     // all / any / none allow coherent control flow
+//!     let d = m.select(a, b);
+//!     assert_eq!(d, i32x4::new(2, 2, 3, 3));
+//! }
+//! ```
+//!
+//! ## Conversions
+//!
+//! * **lossless widening conversions**: [`From`]/[`Into`] are implemented for
+//!   vectors with the same number of lanes when the conversion is value
+//! preserving   (same as in `std`).
+//!
+//! * **safe bitwise conversions**: The cargo feature `into_bits` provides the
+//!   `IntoBits/FromBits` traits (`x.into_bits()`). These perform safe bitwise
+//!   `transmute`s when all bit patterns of the source type are valid bit
+//!   patterns of the target type and are also implemented for the
+//!   architecture-specific vector types of `std::arch`. For example, `let x:
+//!   u8x8 = m8x8::splat(true).into_bits();` is provided because all `m8x8` bit
+//!   patterns are valid `u8x8` bit patterns. However, the opposite is not
+//! true,   not all `u8x8` bit patterns are valid `m8x8` bit-patterns, so this
+//!   operation cannot be peformed safely using `x.into_bits()`; one needs to
+//!   use `unsafe { crate::mem::transmute(x) }` for that, making sure that the
+//!   value in the `u8x8` is a valid bit-pattern of `m8x8`.
+//!
+//! * **numeric casts** (`as`): are peformed using [`FromCast`]/[`Cast`]
+//! (`x.cast()`), just like `as`:
+//!
+//!   * casting integer vectors whose lane types have the same size (e.g.
+//! `i32xN`     -> `u32xN`) is a **no-op**,
+//!
+//!   * casting from a larger integer to a smaller integer (e.g. `u32xN` ->
+//! `u8xN`)     will **truncate**,
+//!
+//!   * casting from a smaller integer to a larger integer     (e.g. `u8xN` ->
+//!     `u32xN`) will:
+//!        * **zero-extend** if the source is unsigned, or
+//!        * **sign-extend** if the source is signed,
+//!
+//!   * casting from a float to an integer will **round the float towards
+//! zero**,
+//!
+//!   * casting from an integer to float will produce the floating point
+//!     representation of the integer, **rounding to nearest, ties to even**,
+//!
+//!   * casting from an `f32` to an `f64` is perfect and lossless,
+//!
+//!   * casting from an `f64` to an `f32` **rounds to nearest, ties to even**.
+//!
+//!   Numeric casts are not very "precise": sometimes lossy, sometimes value
+//!   preserving, etc.
+
+#![feature(
+    repr_simd,
+    const_fn,
+    platform_intrinsics,
+    stdsimd,
+    aarch64_target_feature,
+    arm_target_feature,
+    link_llvm_intrinsics,
+    core_intrinsics,
+    stmt_expr_attributes,
+    mmx_target_feature,
+    crate_visibility_modifier,
+    custom_inner_attributes
+)]
+#![allow(non_camel_case_types, non_snake_case,
+         clippy::cast_possible_truncation,
+         clippy::cast_lossless,
+         clippy::cast_possible_wrap,
+         clippy::cast_precision_loss,
+         // This lint is currently broken for generic code
+         // See https://github.com/rust-lang/rust-clippy/issues/3410
+         clippy::use_self
+)]
+#![cfg_attr(test, feature(hashmap_internals))]
+#![deny(rust_2018_idioms, clippy::missing_inline_in_public_items)]
+#![no_std]
+
+use cfg_if::cfg_if;
+
+cfg_if! {
+    if #[cfg(feature = "core_arch")] {
+        #[allow(unused_imports)]
+        use core_arch as arch;
+    } else {
+        #[allow(unused_imports)]
+        use core::arch;
+    }
+}
+
+#[cfg(all(target_arch = "wasm32", test))]
+use wasm_bindgen_test::*;
+
+#[allow(unused_imports)]
+use core::{
+    /* arch (handled above), */ cmp, f32, f64, fmt, hash, hint, i128,
+    i16, i32, i64, i8, intrinsics, isize, iter, marker, mem, ops, ptr, slice,
+    u128, u16, u32, u64, u8, usize,
+};
+
+#[macro_use]
+mod testing;
+#[macro_use]
+mod api;
+mod codegen;
+mod sealed;
+
+pub use crate::sealed::{Simd as SimdVector, Shuffle, SimdArray, Mask};
+
+/// Packed SIMD vector type.
+///
+/// # Examples
+///
+/// ```
+/// # use packed_simd::Simd;
+/// let v = Simd::<[i32; 4]>::new(0, 1, 2, 3);
+/// assert_eq!(v.extract(2), 2);
+/// ```
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub struct Simd<A: sealed::SimdArray>(
+    // FIXME: this type should be private,
+    // but it currently must be public for the
+    // `shuffle!` macro to work: it needs to
+    // access the internal `repr(simd)` type
+    // to call the shuffle intrinsics.
+    #[doc(hidden)] pub <A as sealed::SimdArray>::Tuple,
+);
+
+impl<A: sealed::SimdArray> sealed::Seal for Simd<A> {}
+
+/// Wrapper over `T` implementing a lexicoraphical order via the `PartialOrd`
+/// and/or `Ord` traits.
+#[repr(transparent)]
+#[derive(Copy, Clone, Debug)]
+#[allow(clippy::missing_inline_in_public_items)]
+pub struct LexicographicallyOrdered<T>(T);
+
+mod masks;
+pub use self::masks::*;
+
+mod v16;
+pub use self::v16::*;
+
+mod v32;
+pub use self::v32::*;
+
+mod v64;
+pub use self::v64::*;
+
+mod v128;
+pub use self::v128::*;
+
+mod v256;
+pub use self::v256::*;
+
+mod v512;
+pub use self::v512::*;
+
+mod vSize;
+pub use self::vSize::*;
+
+mod vPtr;
+pub use self::vPtr::*;
+
+pub use self::api::cast::*;
+
+#[cfg(feature = "into_bits")]
+pub use self::api::into_bits::*;
+
+// Re-export the shuffle intrinsics required by the `shuffle!` macro.
+#[doc(hidden)]
+pub use self::codegen::llvm::{
+    __shuffle_vector16, __shuffle_vector2, __shuffle_vector32,
+    __shuffle_vector4, __shuffle_vector64, __shuffle_vector8,
+};
+
+crate mod llvm {
+    crate use crate::codegen::llvm::*;
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/masks.rs.html b/src/packed_simd/masks.rs.html new file mode 100644 index 000000000..edec6a6be --- /dev/null +++ b/src/packed_simd/masks.rs.html @@ -0,0 +1,261 @@ +masks.rs.html -- source
  1
+  2
+  3
+  4
+  5
+  6
+  7
+  8
+  9
+ 10
+ 11
+ 12
+ 13
+ 14
+ 15
+ 16
+ 17
+ 18
+ 19
+ 20
+ 21
+ 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+
+//! Mask types
+
+macro_rules! impl_mask_ty {
+    ($id:ident : $elem_ty:ident | #[$doc:meta]) => {
+        #[$doc]
+        #[derive(Copy, Clone)]
+        pub struct $id($elem_ty);
+
+        impl crate::sealed::Seal for $id {}
+        impl crate::sealed::Mask for $id {
+            fn test(&self) -> bool {
+                $id::test(self)
+            }
+        }
+
+        impl $id {
+            /// Instantiate a mask with `value`
+            #[inline]
+            pub fn new(x: bool) -> Self {
+                if x {
+                    $id(!0)
+                } else {
+                    $id(0)
+                }
+            }
+            /// Test if the mask is set
+            #[inline]
+            pub fn test(&self) -> bool {
+                self.0 != 0
+            }
+        }
+
+        impl Default for $id {
+            #[inline]
+            fn default() -> Self {
+                $id(0)
+            }
+        }
+
+        #[allow(clippy::partialeq_ne_impl)]
+        impl PartialEq<$id> for $id {
+            #[inline]
+            fn eq(&self, other: &Self) -> bool {
+                self.0 == other.0
+            }
+            #[inline]
+            fn ne(&self, other: &Self) -> bool {
+                self.0 != other.0
+            }
+        }
+
+        impl Eq for $id {}
+
+        impl PartialOrd<$id> for $id {
+            #[inline]
+            fn partial_cmp(
+                &self, other: &Self,
+            ) -> Option<crate::cmp::Ordering> {
+                use crate::cmp::Ordering;
+                if self == other {
+                    Some(Ordering::Equal)
+                } else if self.0 > other.0 {
+                    // Note:
+                    //  * false = 0_i
+                    //  * true == !0_i == -1_i
+                    Some(Ordering::Less)
+                } else {
+                    Some(Ordering::Greater)
+                }
+            }
+
+            #[inline]
+            fn lt(&self, other: &Self) -> bool {
+                self.0 > other.0
+            }
+            #[inline]
+            fn gt(&self, other: &Self) -> bool {
+                self.0 < other.0
+            }
+            #[inline]
+            fn le(&self, other: &Self) -> bool {
+                self.0 >= other.0
+            }
+            #[inline]
+            fn ge(&self, other: &Self) -> bool {
+                self.0 <= other.0
+            }
+        }
+
+        impl Ord for $id {
+            #[inline]
+            fn cmp(&self, other: &Self) -> crate::cmp::Ordering {
+                match self.partial_cmp(other) {
+                    Some(x) => x,
+                    None => unsafe { crate::hint::unreachable_unchecked() },
+                }
+            }
+        }
+
+        impl crate::hash::Hash for $id {
+            #[inline]
+            fn hash<H: crate::hash::Hasher>(&self, state: &mut H) {
+                (self.0 != 0).hash(state);
+            }
+        }
+
+        impl crate::fmt::Debug for $id {
+            #[inline]
+            fn fmt(
+                &self, fmtter: &mut crate::fmt::Formatter<'_>,
+            ) -> Result<(), crate::fmt::Error> {
+                write!(fmtter, "{}({})", stringify!($id), self.0 != 0)
+            }
+        }
+    };
+}
+
+impl_mask_ty!(m8: i8 | /// 8-bit wide mask.
+);
+impl_mask_ty!(m16: i16 | /// 16-bit wide mask.
+);
+impl_mask_ty!(m32: i32 | /// 32-bit wide mask.
+);
+impl_mask_ty!(m64: i64 | /// 64-bit wide mask.
+);
+impl_mask_ty!(m128: i128 | /// 128-bit wide mask.
+);
+impl_mask_ty!(msize: isize | /// isize-wide mask.
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/sealed.rs.html b/src/packed_simd/sealed.rs.html new file mode 100644 index 000000000..20e0fb2f7 --- /dev/null +++ b/src/packed_simd/sealed.rs.html @@ -0,0 +1,87 @@ +sealed.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+
+//! Sealed traits
+
+/// A sealed trait, this is logically private to the crate
+/// and will prevent implementations from outside the crate
+pub trait Seal<T = ()> {}
+
+/// Trait implemented by arrays that can be SIMD types.
+pub trait SimdArray: Seal {
+    /// The type of the #[repr(simd)] type.
+    type Tuple: Copy + Clone;
+    /// The element type of the vector.
+    type T;
+    /// The number of elements in the array.
+    const N: usize;
+    /// The type: `[u32; Self::N]`.
+    type NT;
+}
+
+/// This traits is used to constraint the arguments
+/// and result type of the portable shuffles.
+#[doc(hidden)]
+pub trait Shuffle<Lanes>: Seal<Lanes> {
+    // Lanes is a `[u32; N]` where `N` is the number of vector lanes
+
+    /// The result type of the shuffle.
+    type Output;
+}
+
+/// This trait is implemented by all SIMD vector types.
+pub trait Simd: Seal {
+    /// Element type of the SIMD vector
+    type Element;
+    /// The number of elements in the SIMD vector.
+    const LANES: usize;
+    /// The type: `[u32; Self::N]`.
+    type LanesType;
+}
+
+/// This trait is implemented by all mask types
+pub trait Mask: Seal {
+    fn test(&self) -> bool;
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/testing.rs.html b/src/packed_simd/testing.rs.html new file mode 100644 index 000000000..be396e787 --- /dev/null +++ b/src/packed_simd/testing.rs.html @@ -0,0 +1,19 @@ +testing.rs.html -- source
1
+2
+3
+4
+5
+6
+7
+8
+
+//! Testing macros and other utilities.
+
+#[macro_use]
+mod macros;
+
+#[cfg(test)]
+#[macro_use]
+crate mod utils;
+
+
\ No newline at end of file diff --git a/src/packed_simd/testing/macros.rs.html b/src/packed_simd/testing/macros.rs.html new file mode 100644 index 000000000..452e741d3 --- /dev/null +++ b/src/packed_simd/testing/macros.rs.html @@ -0,0 +1,91 @@ +macros.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+
+//! Testing macros
+
+macro_rules! test_if {
+    ($cfg_tt:tt: $it:item) => {
+        #[cfg(any(
+                            // Test everything if:
+                            //
+                            // * tests are enabled,
+                            // * no features about exclusively testing
+                            //   specific vector classes are enabled
+                            all(test, not(any(
+                                test_v16,
+                                test_v32,
+                                test_v64,
+                                test_v128,
+                                test_v256,
+                                test_v512,
+                                test_none,  // disables all tests
+                            ))),
+                            // Test if:
+                            //
+                            // * tests are enabled
+                            // * a particular cfg token tree returns true
+                            all(test, $cfg_tt),
+                        ))]
+        $it
+    };
+}
+
+#[cfg(test)]
+#[allow(unused)]
+macro_rules! ref_ {
+    ($anything:tt) => {
+        &$anything
+    };
+}
+
+#[cfg(test)]
+#[allow(unused)]
+macro_rules! ref_mut_ {
+    ($anything:tt) => {
+        &mut $anything
+    };
+}
+
+
\ No newline at end of file diff --git a/src/packed_simd/v128.rs.html b/src/packed_simd/v128.rs.html new file mode 100644 index 000000000..492dc771a --- /dev/null +++ b/src/packed_simd/v128.rs.html @@ -0,0 +1,163 @@ +v128.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+
+//! 128-bit wide vector types
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_i!([i8; 16]: i8x16, m8x16 | i8, u16 | test_v128 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: |
+        /// A 128-bit vector with 16 `i8` lanes.
+);
+impl_u!([u8; 16]: u8x16, m8x16 | u8, u16 | test_v128 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: |
+        /// A 128-bit vector with 16 `u8` lanes.
+);
+impl_m!([m8; 16]: m8x16 | i8, u16 | test_v128 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: m16x16 |
+        /// A 128-bit vector mask with 16 `m8` lanes.
+);
+
+impl_i!([i16; 8]: i16x8, m16x8 | i16, u8 | test_v128 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: i8x8, u8x8 |
+        /// A 128-bit vector with 8 `i16` lanes.
+);
+impl_u!([u16; 8]: u16x8, m16x8 | u16, u8 | test_v128 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: u8x8 |
+        /// A 128-bit vector with 8 `u16` lanes.
+);
+impl_m!([m16; 8]: m16x8 | i16, u8 | test_v128 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: m8x8, m32x8 |
+        /// A 128-bit vector mask with 8 `m16` lanes.
+);
+
+impl_i!([i32; 4]: i32x4, m32x4 | i32, u8 | test_v128 | x0, x1, x2, x3 |
+        From: i8x4, u8x4, i16x4, u16x4  |
+        /// A 128-bit vector with 4 `i32` lanes.
+);
+impl_u!([u32; 4]: u32x4, m32x4 | u32, u8 | test_v128 | x0, x1, x2, x3 |
+        From: u8x4, u16x4 |
+        /// A 128-bit vector with 4 `u32` lanes.
+);
+impl_f!([f32; 4]: f32x4, m32x4 | f32 | test_v128 | x0, x1, x2, x3 |
+        From: i8x4, u8x4, i16x4, u16x4 |
+        /// A 128-bit vector with 4 `f32` lanes.
+);
+impl_m!([m32; 4]: m32x4 | i32, u8 | test_v128 | x0, x1, x2, x3 |
+        From: m8x4, m16x4, m64x4 |
+        /// A 128-bit vector mask with 4 `m32` lanes.
+);
+
+impl_i!([i64; 2]: i64x2, m64x2 | i64, u8 | test_v128 | x0, x1 |
+        From: i8x2, u8x2, i16x2, u16x2, i32x2, u32x2 |
+        /// A 128-bit vector with 2 `i64` lanes.
+);
+impl_u!([u64; 2]: u64x2, m64x2 | u64, u8 | test_v128 | x0, x1 |
+        From: u8x2, u16x2, u32x2 |
+        /// A 128-bit vector with 2 `u64` lanes.
+);
+impl_f!([f64; 2]: f64x2, m64x2 | f64 | test_v128 | x0, x1 |
+        From: i8x2, u8x2, i16x2, u16x2, i32x2, u32x2, f32x2 |
+        /// A 128-bit vector with 2 `f64` lanes.
+);
+impl_m!([m64; 2]: m64x2 | i64, u8 | test_v128 | x0, x1 |
+        From: m8x2, m16x2, m32x2, m128x2 |
+        /// A 128-bit vector mask with 2 `m64` lanes.
+);
+
+impl_i!([i128; 1]: i128x1, m128x1 | i128, u8 | test_v128 | x0 |
+        From: /*i8x1, u8x1, i16x1, u16x1, i32x1, u32x1, i64x1, u64x1 */ | // FIXME: unary small vector types
+        /// A 128-bit vector with 1 `i128` lane.
+);
+impl_u!([u128; 1]: u128x1, m128x1 | u128, u8 | test_v128 | x0 |
+        From: /*u8x1, u16x1, u32x1, u64x1 */ | // FIXME: unary small vector types
+        /// A 128-bit vector with 1 `u128` lane.
+);
+impl_m!([m128; 1]: m128x1 | i128, u8 | test_v128 | x0 |
+        From: /*m8x1, m16x1, m32x1, m64x1 */ | // FIXME: unary small vector types
+        /// A 128-bit vector mask with 1 `m128` lane.
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/v16.rs.html b/src/packed_simd/v16.rs.html new file mode 100644 index 000000000..f681d9920 --- /dev/null +++ b/src/packed_simd/v16.rs.html @@ -0,0 +1,35 @@ +v16.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+
+//! 16-bit wide vector types
+
+use crate::*;
+
+impl_i!([i8; 2]: i8x2, m8x2 | i8, u8 | test_v16 | x0, x1 |
+        From: |
+        /// A 16-bit vector with 2 `i8` lanes.
+);
+impl_u!([u8; 2]: u8x2, m8x2 | u8, u8 | test_v16 | x0, x1 |
+        From: |
+        /// A 16-bit vector with 2 `u8` lanes.
+);
+impl_m!([m8; 2]: m8x2 | i8, u8 | test_v16 | x0, x1 |
+        From: m16x2, m32x2, m64x2, m128x2 |
+        /// A 16-bit vector mask with 2 `m8` lanes.
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/v256.rs.html b/src/packed_simd/v256.rs.html new file mode 100644 index 000000000..23dc81f67 --- /dev/null +++ b/src/packed_simd/v256.rs.html @@ -0,0 +1,175 @@ +v256.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+
+//! 256-bit wide vector types
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_i!([i8; 32]: i8x32, m8x32 | i8, u32 | test_v256 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31 |
+        From: |
+        /// A 256-bit vector with 32 `i8` lanes.
+);
+impl_u!([u8; 32]: u8x32, m8x32 | u8, u32 | test_v256 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31 |
+        From: |
+        /// A 256-bit vector with 32 `u8` lanes.
+);
+impl_m!([m8; 32]: m8x32 | i8, u32 | test_v256 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31 |
+        From:  |
+        /// A 256-bit vector mask with 32 `m8` lanes.
+);
+
+impl_i!([i16; 16]: i16x16, m16x16 | i16, u16 | test_v256 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: i8x16, u8x16 |
+        /// A 256-bit vector with 16 `i16` lanes.
+);
+impl_u!([u16; 16]: u16x16, m16x16 | u16, u16 | test_v256 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: u8x16 |
+        /// A 256-bit vector with 16 `u16` lanes.
+);
+impl_m!([m16; 16]: m16x16 | i16, u16 | test_v256 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: m8x16 |
+        /// A 256-bit vector mask with 16 `m16` lanes.
+);
+
+impl_i!([i32; 8]: i32x8, m32x8 | i32, u8 | test_v256 | x0, x1, x2, x3, x4, x5, x6, x7  |
+        From: i8x8, u8x8, i16x8, u16x8 |
+        /// A 256-bit vector with 8 `i32` lanes.
+);
+impl_u!([u32; 8]: u32x8, m32x8 | u32, u8 | test_v256 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: u8x8, u16x8 |
+        /// A 256-bit vector with 8 `u32` lanes.
+);
+impl_f!([f32; 8]: f32x8, m32x8 | f32 | test_v256 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: i8x8, u8x8, i16x8, u16x8 |
+        /// A 256-bit vector with 8 `f32` lanes.
+);
+impl_m!([m32; 8]: m32x8 | i32, u8 | test_v256 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: m8x8, m16x8 |
+        /// A 256-bit vector mask with 8 `m32` lanes.
+);
+
+impl_i!([i64; 4]: i64x4, m64x4 | i64, u8 | test_v256 | x0, x1, x2, x3 |
+        From: i8x4, u8x4, i16x4, u16x4, i32x4, u32x4 |
+        /// A 256-bit vector with 4 `i64` lanes.
+);
+impl_u!([u64; 4]: u64x4, m64x4 | u64, u8 | test_v256 | x0, x1, x2, x3 |
+        From: u8x4, u16x4, u32x4 |
+        /// A 256-bit vector with 4 `u64` lanes.
+);
+impl_f!([f64; 4]: f64x4, m64x4 | f64 | test_v256 | x0, x1, x2, x3 |
+        From: i8x4, u8x4, i16x4, u16x4, i32x4, u32x4, f32x4 |
+        /// A 256-bit vector with 4 `f64` lanes.
+);
+impl_m!([m64; 4]: m64x4 | i64, u8 | test_v256 | x0, x1, x2, x3 |
+        From: m8x4, m16x4, m32x4 |
+        /// A 256-bit vector mask with 4 `m64` lanes.
+);
+
+impl_i!([i128; 2]: i128x2, m128x2 | i128, u8 | test_v256 | x0, x1 |
+        From: i8x2, u8x2, i16x2, u16x2, i32x2, u32x2, i64x2, u64x2 |
+        /// A 256-bit vector with 2 `i128` lanes.
+);
+impl_u!([u128; 2]: u128x2, m128x2 | u128, u8 | test_v256 | x0, x1 |
+        From: u8x2, u16x2, u32x2, u64x2 |
+        /// A 256-bit vector with 2 `u128` lanes.
+);
+impl_m!([m128; 2]: m128x2 | i128, u8 | test_v256 | x0, x1 |
+        From: m8x2, m16x2, m32x2, m64x2 |
+        /// A 256-bit vector mask with 2 `m128` lanes.
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/v32.rs.html b/src/packed_simd/v32.rs.html new file mode 100644 index 000000000..21025bc31 --- /dev/null +++ b/src/packed_simd/v32.rs.html @@ -0,0 +1,61 @@ +v32.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+
+//! 32-bit wide vector types
+
+use crate::*;
+
+impl_i!([i8; 4]: i8x4, m8x4 | i8, u8 | test_v32 | x0, x1, x2, x3 |
+        From: |
+        /// A 32-bit vector with 4 `i8` lanes.
+);
+impl_u!([u8; 4]: u8x4, m8x4 | u8, u8 | test_v32 | x0, x1, x2, x3 |
+        From: |
+        /// A 32-bit vector with 4 `u8` lanes.
+);
+impl_m!([m8; 4]: m8x4 | i8, u8 | test_v32 | x0, x1, x2, x3 |
+        From: m16x4, m32x4, m64x4 |
+        /// A 32-bit vector mask with 4 `m8` lanes.
+);
+
+impl_i!([i16; 2]: i16x2, m16x2 | i16, u8 | test_v32 | x0, x1 |
+        From: i8x2, u8x2 |
+        /// A 32-bit vector with 2 `i16` lanes.
+);
+impl_u!([u16; 2]: u16x2, m16x2 | u16, u8 | test_v32 | x0, x1 |
+        From: u8x2 |
+        /// A 32-bit vector with 2 `u16` lanes.
+);
+impl_m!([m16; 2]: m16x2 | i16, u8 | test_v32 | x0, x1 |
+        From: m8x2, m32x2, m64x2, m128x2 |
+        /// A 32-bit vector mask with 2 `m16` lanes.
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/v512.rs.html b/src/packed_simd/v512.rs.html new file mode 100644 index 000000000..3df374904 --- /dev/null +++ b/src/packed_simd/v512.rs.html @@ -0,0 +1,201 @@ +v512.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
+98
+99
+
+//! 512-bit wide vector types
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_i!([i8; 64]: i8x64, m8x64 | i8, u64 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31,
+        x32, x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x45, x46, x47,
+        x48, x49, x50, x51, x52, x53, x54, x55, x56, x57, x58, x59, x60, x61, x62, x63 |
+        From: |
+        /// A 512-bit vector with 64 `i8` lanes.
+);
+impl_u!([u8; 64]: u8x64, m8x64 | u8, u64 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31,
+        x32, x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x45, x46, x47,
+        x48, x49, x50, x51, x52, x53, x54, x55, x56, x57, x58, x59, x60, x61, x62, x63 |
+        From: |
+        /// A 512-bit vector with 64 `u8` lanes.
+);
+impl_m!([m8; 64]: m8x64 | i8, u64 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31,
+        x32, x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x45, x46, x47,
+        x48, x49, x50, x51, x52, x53, x54, x55, x56, x57, x58, x59, x60, x61, x62, x63 |
+        From:  |
+        /// A 512-bit vector mask with 64 `m8` lanes.
+);
+
+impl_i!([i16; 32]: i16x32, m16x32 | i16, u32 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31 |
+        From: i8x32, u8x32 |
+        /// A 512-bit vector with 32 `i16` lanes.
+);
+impl_u!([u16; 32]: u16x32, m16x32 | u16, u32 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31 |
+        From: u8x32 |
+        /// A 512-bit vector with 32 `u16` lanes.
+);
+impl_m!([m16; 32]: m16x32 | i16, u32 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+        x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31 |
+        From: m8x32 |
+        /// A 512-bit vector mask with 32 `m16` lanes.
+);
+
+impl_i!([i32; 16]: i32x16, m32x16 | i32, u16 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: i8x16, u8x16, i16x16, u16x16 |
+        /// A 512-bit vector with 16 `i32` lanes.
+);
+impl_u!([u32; 16]: u32x16, m32x16 | u32, u16 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: u8x16, u16x16 |
+        /// A 512-bit vector with 16 `u32` lanes.
+);
+impl_f!([f32; 16]: f32x16, m32x16 | f32 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: i8x16, u8x16, i16x16, u16x16 |
+        /// A 512-bit vector with 16 `f32` lanes.
+);
+impl_m!([m32; 16]: m32x16 | i32, u16 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15 |
+        From: m8x16, m16x16 |
+        /// A 512-bit vector mask with 16 `m32` lanes.
+);
+
+impl_i!([i64; 8]: i64x8, m64x8 | i64, u8 | test_v512 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: i8x8, u8x8, i16x8, u16x8, i32x8, u32x8 |
+        /// A 512-bit vector with 8 `i64` lanes.
+);
+impl_u!([u64; 8]: u64x8, m64x8 | u64, u8 | test_v512 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: u8x8, u16x8, u32x8 |
+        /// A 512-bit vector with 8 `u64` lanes.
+);
+impl_f!([f64; 8]: f64x8, m64x8 | f64 | test_v512 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: i8x8, u8x8, i16x8, u16x8, i32x8, u32x8, f32x8 |
+        /// A 512-bit vector with 8 `f64` lanes.
+);
+impl_m!([m64; 8]: m64x8 | i64, u8 | test_v512 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: m8x8, m16x8, m32x8 |
+        /// A 512-bit vector mask with 8 `m64` lanes.
+);
+
+impl_i!([i128; 4]: i128x4, m128x4 | i128, u8 | test_v512 | x0, x1, x2, x3 |
+        From: i8x4, u8x4, i16x4, u16x4, i32x4, u32x4, i64x4, u64x4 |
+        /// A 512-bit vector with 4 `i128` lanes.
+);
+impl_u!([u128; 4]: u128x4, m128x4 | u128, u8 | test_v512 | x0, x1, x2, x3 |
+        From: u8x4, u16x4, u32x4, u64x4 |
+        /// A 512-bit vector with 4 `u128` lanes.
+);
+impl_m!([m128; 4]: m128x4 | i128, u8 | test_v512 | x0, x1, x2, x3 |
+        From: m8x4, m16x4, m32x4, m64x4 |
+        /// A 512-bit vector mask with 4 `m128` lanes.
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/v64.rs.html b/src/packed_simd/v64.rs.html new file mode 100644 index 000000000..8832f96d0 --- /dev/null +++ b/src/packed_simd/v64.rs.html @@ -0,0 +1,135 @@ +v64.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+
+//! 64-bit wide vector types
+#![rustfmt::skip]
+
+use super::*;
+
+impl_i!([i8; 8]: i8x8, m8x8 | i8, u8 | test_v64 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: |
+        /// A 64-bit vector with 8 `i8` lanes.
+);
+impl_u!([u8; 8]: u8x8, m8x8 | u8, u8 | test_v64 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: |
+        /// A 64-bit vector with 8 `u8` lanes.
+);
+impl_m!([m8; 8]: m8x8 | i8, u8 | test_v64 | x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: m16x8, m32x8 |
+        /// A 64-bit vector mask with 8 `m8` lanes.
+);
+
+impl_i!([i16; 4]: i16x4, m16x4 | i16, u8 | test_v64 | x0, x1, x2, x3 |
+        From: i8x4, u8x4 |
+        /// A 64-bit vector with 4 `i16` lanes.
+);
+impl_u!([u16; 4]: u16x4, m16x4 | u16, u8 | test_v64 | x0, x1, x2, x3 |
+        From: u8x4 |
+        /// A 64-bit vector with 4 `u16` lanes.
+);
+impl_m!([m16; 4]: m16x4 | i16, u8 | test_v64 | x0, x1, x2, x3 |
+        From: m8x4, m32x4, m64x4 |
+        /// A 64-bit vector mask with 4 `m16` lanes.
+);
+
+impl_i!([i32; 2]: i32x2, m32x2 | i32, u8 | test_v64 | x0, x1 |
+        From: i8x2, u8x2, i16x2, u16x2 |
+        /// A 64-bit vector with 2 `i32` lanes.
+);
+impl_u!([u32; 2]: u32x2, m32x2 | u32, u8 | test_v64 | x0, x1 |
+        From: u8x2, u16x2 |
+        /// A 64-bit vector with 2 `u32` lanes.
+);
+impl_m!([m32; 2]: m32x2 | i32, u8 | test_v64 | x0, x1 |
+        From: m8x2, m16x2, m64x2, m128x2 |
+        /// A 64-bit vector mask with 2 `m32` lanes.
+);
+impl_f!([f32; 2]: f32x2, m32x2 | f32 | test_v64 | x0, x1 |
+        From: i8x2, u8x2, i16x2, u16x2 |
+        /// A 64-bit vector with 2 `f32` lanes.
+);
+
+/*
+impl_i!([i64; 1]: i64x1, m64x1 | i64, u8 | test_v64 | x0 |
+        From: /*i8x1, u8x1, i16x1, u16x1, i32x1, u32x1*/ |  // FIXME: primitive to vector conversion
+        /// A 64-bit vector with 1 `i64` lanes.
+);
+impl_u!([u64; 1]: u64x1, m64x1 | u64, u8 | test_v64 | x0 |
+        From: /*u8x1, u16x1, u32x1*/ | // FIXME: primitive to vector conversion
+        /// A 64-bit vector with 1 `u64` lanes.
+);
+impl_m!([m64; 1]: m64x1 | i64, u8 | test_v64 | x0 |
+        From: /*m8x1, m16x1, m32x1, */ m128x1 | // FIXME: unary small vector types
+        /// A 64-bit vector mask with 1 `m64` lanes.
+);
+impl_f!([f64; 1]: f64x1, m64x1 | f64 | test_v64 | x0 |
+        From: /*i8x1, u8x1, i16x1, u16x1, i32x1, u32x1, f32x1*/ | // FIXME: unary small vector types
+        /// A 64-bit vector with 1 `f64` lanes.
+);
+*/
+
+
\ No newline at end of file diff --git a/src/packed_simd/vPtr.rs.html b/src/packed_simd/vPtr.rs.html new file mode 100644 index 000000000..f2a82e228 --- /dev/null +++ b/src/packed_simd/vPtr.rs.html @@ -0,0 +1,71 @@ +vPtr.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+
+//! Vectors of pointers
+#![rustfmt::skip]
+
+use crate::*;
+
+impl_const_p!(
+    [*const T; 2]: cptrx2, msizex2, usizex2, isizex2 | test_v128 | x0, x1 | From: |
+    /// A vector with 2 `*const T` lanes
+);
+
+impl_mut_p!(
+    [*mut T; 2]: mptrx2, msizex2, usizex2, isizex2 | test_v128 | x0, x1 | From: |
+    /// A vector with 2 `*mut T` lanes
+);
+
+impl_const_p!(
+    [*const T; 4]: cptrx4, msizex4, usizex4, isizex4 | test_v256 | x0, x1, x2, x3 | From: |
+    /// A vector with 4 `*const T` lanes
+);
+
+impl_mut_p!(
+    [*mut T; 4]: mptrx4, msizex4, usizex4, isizex4 | test_v256 | x0, x1, x2, x3 | From: |
+    /// A vector with 4 `*mut T` lanes
+);
+
+impl_const_p!(
+    [*const T; 8]: cptrx8, msizex8, usizex8, isizex8 | test_v512 | x0, x1, x2, x3, x4, x5, x6, x7 | From: |
+    /// A vector with 8 `*const T` lanes
+);
+
+impl_mut_p!(
+    [*mut T; 8]: mptrx8, msizex8, usizex8, isizex8 | test_v512 | x0, x1, x2, x3, x4, x5, x6, x7 | From: |
+    /// A vector with 8 `*mut T` lanes
+);
+
+
\ No newline at end of file diff --git a/src/packed_simd/vSize.rs.html b/src/packed_simd/vSize.rs.html new file mode 100644 index 000000000..f1fe797f8 --- /dev/null +++ b/src/packed_simd/vSize.rs.html @@ -0,0 +1,109 @@ +vSize.rs.html -- source
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+
+//! Vectors with pointer-sized elements
+
+use crate::codegen::pointer_sized_int::{isize_, usize_};
+use crate::*;
+
+impl_i!([isize; 2]: isizex2, msizex2 | isize_, u8 | test_v128 |
+        x0, x1|
+        From: |
+        /// A vector with 2 `isize` lanes.
+);
+
+impl_u!([usize; 2]: usizex2, msizex2 | usize_, u8 | test_v128 |
+        x0, x1|
+        From: |
+        /// A vector with 2 `usize` lanes.
+);
+impl_m!([msize; 2]: msizex2 | isize_, u8 | test_v128 |
+        x0, x1 |
+        From: |
+        /// A vector mask with 2 `msize` lanes.
+);
+
+impl_i!([isize; 4]: isizex4, msizex4 | isize_, u8 | test_v256 |
+        x0, x1, x2, x3 |
+        From: |
+        /// A vector with 4 `isize` lanes.
+);
+impl_u!([usize; 4]: usizex4, msizex4 | usize_, u8 | test_v256 |
+        x0, x1, x2, x3|
+        From: |
+        /// A vector with 4 `usize` lanes.
+);
+impl_m!([msize; 4]: msizex4 | isize_, u8 | test_v256 |
+        x0, x1, x2, x3 |
+        From: |
+        /// A vector mask with 4 `msize` lanes.
+);
+
+impl_i!([isize; 8]: isizex8, msizex8 | isize_, u8 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: |
+        /// A vector with 4 `isize` lanes.
+);
+impl_u!([usize; 8]: usizex8, msizex8 | usize_, u8 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: |
+        /// A vector with 8 `usize` lanes.
+);
+impl_m!([msize; 8]: msizex8 | isize_, u8 | test_v512 |
+        x0, x1, x2, x3, x4, x5, x6, x7 |
+        From: |
+        /// A vector mask with 8 `msize` lanes.
+);
+
+
\ No newline at end of file diff --git a/storage.js b/storage.js new file mode 100644 index 000000000..1b5225126 --- /dev/null +++ b/storage.js @@ -0,0 +1 @@ +var resourcesSuffix="";var currentTheme=document.getElementById("themeStyle");var mainTheme=document.getElementById("mainThemeStyle");var savedHref=[];function hasClass(elem,className){return elem&&elem.classList&&elem.classList.contains(className)}function addClass(elem,className){if(!elem||!elem.classList){return}elem.classList.add(className)}function removeClass(elem,className){if(!elem||!elem.classList){return}elem.classList.remove(className)}function onEach(arr,func,reversed){if(arr&&arr.length>0&&func){var length=arr.length;var i;if(reversed!==true){for(i=0;i=0;--i){if(func(arr[i])===true){return true}}}}return false}function onEachLazy(lazyArray,func,reversed){return onEach(Array.prototype.slice.call(lazyArray),func,reversed)}function hasOwnProperty(obj,property){return Object.prototype.hasOwnProperty.call(obj,property)}function usableLocalStorage(){if(typeof Storage==="undefined"){return false}try{return window.localStorage!==null&&window.localStorage!==undefined}catch(err){return false}}function updateLocalStorage(name,value){if(usableLocalStorage()){localStorage[name]=value}else{}}function getCurrentValue(name){if(usableLocalStorage()&&localStorage[name]!==undefined){return localStorage[name]}return null}function switchTheme(styleElem,mainStyleElem,newTheme,saveTheme){var fullBasicCss="rustdoc"+resourcesSuffix+".css";var fullNewTheme=newTheme+resourcesSuffix+".css";var newHref=mainStyleElem.href.replace(fullBasicCss,fullNewTheme);if(styleElem.href===newHref){return}var found=false;if(savedHref.length===0){onEachLazy(document.getElementsByTagName("link"),function(el){savedHref.push(el.href)})}onEach(savedHref,function(el){if(el===newHref){found=true;return true}});if(found===true){styleElem.href=newHref;if(saveTheme===true){updateLocalStorage("rustdoc-theme",newTheme)}}}function getSystemValue(){var property=getComputedStyle(document.documentElement).getPropertyValue('content');return property.replace(/[\"\']/g,"")}switchTheme(currentTheme,mainTheme,getCurrentValue("rustdoc-theme")||getSystemValue()||"light",false) \ No newline at end of file diff --git a/theme.js b/theme.js new file mode 100644 index 000000000..ebd1a8727 --- /dev/null +++ b/theme.js @@ -0,0 +1 @@ +var themes=document.getElementById("theme-choices");var themePicker=document.getElementById("theme-picker");function showThemeButtonState(){themes.style.display="block";themePicker.style.borderBottomRightRadius="0";themePicker.style.borderBottomLeftRadius="0"}function hideThemeButtonState(){themes.style.display="none";themePicker.style.borderBottomRightRadius="3px";themePicker.style.borderBottomLeftRadius="3px"}function switchThemeButtonState(){if(themes.style.display==="block"){hideThemeButtonState()}else{showThemeButtonState()}};function handleThemeButtonsBlur(e){var active=document.activeElement;var related=e.relatedTarget;if(active.id!=="themePicker"&&(!active.parentNode||active.parentNode.id!=="theme-choices")&&(!related||(related.id!=="themePicker"&&(!related.parentNode||related.parentNode.id!=="theme-choices")))){hideThemeButtonState()}}themePicker.onclick=switchThemeButtonState;themePicker.onblur=handleThemeButtonsBlur;["dark","light"].forEach(function(item){var but=document.createElement('button');but.textContent=item;but.onclick=function(el){switchTheme(currentTheme,mainTheme,item,true)};but.onblur=handleThemeButtonsBlur;themes.appendChild(but)}) \ No newline at end of file diff --git a/wheel.svg b/wheel.svg new file mode 100644 index 000000000..01da3b24c --- /dev/null +++ b/wheel.svg @@ -0,0 +1 @@ + \ No newline at end of file