diff --git a/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml
index 245abb80c9f..f4ff97b5f3a 100644
--- a/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/AdvSimd+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARM AdvSIMD hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -8583,7 +8583,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
To be added.
@@ -8613,7 +8615,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -8643,7 +8647,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
To be added.
@@ -8673,7 +8679,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -8703,7 +8711,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -8733,7 +8743,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
To be added.
@@ -8763,7 +8775,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -8793,7 +8807,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
To be added.
@@ -8823,7 +8839,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -8853,7 +8871,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -8883,7 +8903,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
To be added.
@@ -8913,7 +8935,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -8943,7 +8967,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
To be added.
@@ -8973,7 +8999,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -9003,7 +9031,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -9033,7 +9063,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
To be added.
@@ -9063,7 +9095,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -9093,7 +9127,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
To be added.
@@ -9123,7 +9159,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -9153,7 +9191,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -9183,7 +9223,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
To be added.
@@ -9213,7 +9255,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -9243,7 +9287,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
To be added.
@@ -9273,7 +9319,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -9303,7 +9351,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -9333,7 +9383,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
To be added.
@@ -9363,7 +9415,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -9393,7 +9447,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
To be added.
@@ -9423,7 +9479,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -9453,7 +9511,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -9483,7 +9543,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
To be added.
@@ -9513,7 +9575,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -9543,7 +9607,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
To be added.
@@ -9573,7 +9639,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -9603,7 +9671,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -9633,7 +9703,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
To be added.
@@ -9663,7 +9735,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -9693,7 +9767,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
To be added.
@@ -9723,7 +9799,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -9753,7 +9831,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -9783,7 +9863,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
To be added.
@@ -9813,7 +9895,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -9843,7 +9927,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
To be added.
@@ -9873,7 +9959,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -9903,7 +9991,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -9933,7 +10023,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
To be added.
@@ -9963,7 +10055,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -9993,7 +10087,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
To be added.
@@ -10023,7 +10119,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -10053,7 +10151,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D}, [Xn]
+
To be added.
To be added.
@@ -10083,7 +10183,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
To be added.
@@ -10113,7 +10215,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -10143,7 +10247,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
To be added.
@@ -10173,7 +10279,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -10203,7 +10311,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -10233,7 +10343,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
To be added.
@@ -10263,7 +10375,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -10293,7 +10407,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
To be added.
@@ -10323,7 +10439,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -10353,7 +10471,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -10394,7 +10514,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }[Vm], [Xn]
+
To be added.
To be added.
@@ -10435,7 +10557,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.16B, Vn+1.16B, Vn+2.16B }[Vm], [Xn]
+
To be added.
To be added.
@@ -10476,7 +10600,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.16B, Vn+1.16B }[Vm], [Xn]
+
To be added.
To be added.
@@ -10517,7 +10643,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -10558,7 +10686,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.2D, Vn+1.2D, Vn+2.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -10599,7 +10729,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.2D, Vn+1.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -10640,7 +10772,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }[Vm], [Xn]
+
To be added.
To be added.
@@ -10681,7 +10815,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.8H, Vn+1.8H, Vn+2.8H }[Vm], [Xn]
+
To be added.
To be added.
@@ -10722,7 +10858,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.8H, Vn+1.8H }[Vm], [Xn]
+
To be added.
To be added.
@@ -10763,7 +10901,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -10804,7 +10944,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.4S, Vn+1.4S, Vn+2.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -10845,7 +10987,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.4S, Vn+1.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -10886,7 +11030,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -10927,7 +11073,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.2D, Vn+1.2D, Vn+2.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -10968,7 +11116,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.2D, Vn+1.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -11009,7 +11159,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }[Vm], [Xn]
+
To be added.
To be added.
@@ -11050,7 +11202,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.16B, Vn+1.16B, Vn+2.16B }[Vm], [Xn]
+
To be added.
To be added.
@@ -11091,7 +11245,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.16B, Vn+1.16B }[Vm], [Xn]
+
To be added.
To be added.
@@ -11132,7 +11288,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -11173,7 +11331,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.4S, Vn+1.4S, Vn+2.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -11214,7 +11374,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.4S, Vn+1.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -11255,7 +11417,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }[Vm], [Xn]
+
To be added.
To be added.
@@ -11296,7 +11460,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.8H, Vn+1.8H, Vn+2.8H }[Vm], [Xn]
+
To be added.
To be added.
@@ -11337,7 +11503,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.8H, Vn+1.8H }[Vm], [Xn]
+
To be added.
To be added.
@@ -11378,7 +11546,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -11419,7 +11589,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.4S, Vn+1.4S, Vn+2.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -11460,7 +11632,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.4S, Vn+1.4S }[Vm], [Xn]
+
To be added.
To be added.
@@ -11501,7 +11675,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -11542,7 +11718,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.2D, Vn+1.2D, Vn+2.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -11583,7 +11761,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.2D, Vn+1.2D }[Vm], [Xn]
+
To be added.
To be added.
@@ -11706,7 +11886,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
To be added.
@@ -11736,7 +11918,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -11766,7 +11950,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
To be added.
@@ -11796,7 +11982,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -11826,7 +12014,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -11856,7 +12046,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
To be added.
@@ -11886,7 +12078,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -11916,7 +12110,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
To be added.
@@ -11946,7 +12142,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
To be added.
@@ -11976,7 +12174,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
To be added.
@@ -12006,7 +12206,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
To be added.
@@ -12036,7 +12238,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -12066,7 +12270,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
To be added.
@@ -12096,7 +12302,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -12126,7 +12334,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -12156,7 +12366,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
To be added.
@@ -12186,7 +12398,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -12216,7 +12430,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
To be added.
@@ -12246,7 +12462,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
To be added.
@@ -12276,7 +12494,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
To be added.
@@ -12306,7 +12526,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
To be added.
@@ -12336,7 +12558,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -12366,7 +12590,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
To be added.
@@ -12396,7 +12622,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -12426,7 +12654,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -12456,7 +12686,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
To be added.
@@ -12486,7 +12718,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -12516,7 +12750,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
To be added.
@@ -12546,7 +12782,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
To be added.
@@ -12576,7 +12814,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
To be added.
@@ -21326,7 +21566,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
@@ -21358,7 +21600,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
@@ -21390,7 +21634,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
@@ -21422,7 +21668,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
@@ -21454,7 +21702,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
@@ -21486,7 +21736,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
@@ -21518,7 +21770,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
@@ -21550,7 +21804,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
@@ -21582,7 +21838,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
@@ -21614,7 +21872,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
@@ -21646,7 +21906,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
@@ -21678,7 +21940,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
@@ -21710,7 +21974,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
@@ -21742,7 +22008,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
@@ -21774,7 +22042,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
@@ -21806,7 +22076,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
@@ -21838,7 +22110,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
@@ -21870,7 +22144,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
@@ -21902,7 +22178,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
@@ -21934,7 +22212,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
@@ -21966,7 +22246,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
@@ -21998,7 +22280,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
@@ -22030,7 +22314,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
@@ -22062,7 +22348,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
@@ -22094,7 +22382,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
@@ -22126,7 +22416,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
@@ -22158,7 +22450,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
@@ -22190,7 +22484,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
@@ -22222,7 +22518,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
@@ -22254,7 +22552,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
@@ -23813,7 +24113,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst4_lane_s8 (int8_t * ptr, int8x16x4_t val, const int lane)
+ A64: ST4 { Vt.16B, Vt+1.16B, Vt+2.16B, Vt+3.16B }[index], [Xn]
+
To be added.
@@ -23854,7 +24157,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst3_lane_s8 (int8_t * ptr, int8x16x3_t val, const int lane)
+ A64: ST3 { Vt.16B, Vt+1.16B, Vt+2.16B }[index], [Xn]
+
To be added.
@@ -23895,7 +24201,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst2_lane_s8 (int8_t * ptr, int8x16x2_t val, const int lane)
+ A64: ST2 { Vt.16B, Vt+1.16B }[index], [Xn]
+
To be added.
@@ -23936,7 +24245,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.2D, Vt+1.2D, Vt+2.2D, Vt+3.2D }[index], [Xn]
+
To be added.
@@ -23977,7 +24288,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.2D, Vt+1.2D, Vt+2.2D }[index], [Xn]
+
To be added.
@@ -24018,7 +24331,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.2D, Vt+1.2D }[index], [Xn]
+
To be added.
@@ -24059,7 +24374,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst4_lane_s16 (int16_t * ptr, int16x8x4_t val, const int lane)
+ A64: ST4 { Vt.8H, Vt+1.8H, Vt+2.8H, Vt+3.8H }[index], [Xn]
+
To be added.
@@ -24100,7 +24418,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst3_lane_s16 (int16_t * ptr, int16x8x3_t val, const int lane)
+ A64: ST3 { Vt.8H, Vt+1.8H, Vt+2.8H }[index], [Xn]
+
To be added.
@@ -24141,7 +24462,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst2_lane_s16 (int16_t * ptr, int16x8x2_t val, const int lane)
+ A64: ST2 { Vt.8H, Vt+1.8H }[index], [Xn]
+
To be added.
@@ -24182,7 +24506,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst4_lane_s32 (int32_t * ptr, int32x4x4_t val, const int lane)
+ A64: ST4 { Vt.4S, Vt+1.4S, Vt+2.4S, Vt+3.4S }[index], [Xn]
+
To be added.
@@ -24223,7 +24550,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst3_lane_s32 (int32_t * ptr, int32x4x3_t val, const int lane)
+ A64: ST3 { Vt.4S, Vt+1.4S, Vt+2.4S }[index], [Xn]
+
To be added.
@@ -24264,7 +24594,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst2_lane_s32 (int32_t * ptr, int32x4x2_t val, const int lane)
+ A64: ST2 { Vt.4S, Vt+1.4S }[index], [Xn]
+
To be added.
@@ -24305,7 +24638,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.2D, Vt+1.2D, Vt+2.2D, Vt+3.2D }[index], [Xn]
+
To be added.
@@ -24346,7 +24681,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.2D, Vt+1.2D, Vt+2.2D }[index], [Xn]
+
To be added.
@@ -24387,7 +24724,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.2D, Vt+1.2D }[index], [Xn]
+
To be added.
@@ -24428,7 +24767,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst4_lane_s8 (int8_t * ptr, int8x16x4_t val, const int lane)
+ A64: ST4 { Vt.16B, Vt+1.16B, Vt+2.16B, Vt+3.16B }[index], [Xn]
+
To be added.
@@ -24469,7 +24811,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst3_lane_s8 (int8_t * ptr, int8x16x3_t val, const int lane)
+ A64: ST3 { Vt.16B, Vt+1.16B, Vt+2.16B }[index], [Xn]
+
To be added.
@@ -24510,7 +24855,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst2_lane_s8 (int8_t * ptr, int8x16x2_t val, const int lane)
+ A64: ST2 { Vt.16B, Vt+1.16B }[index], [Xn]
+
To be added.
@@ -24551,7 +24899,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst4_lane_f32 (float32_t * ptr, float32x2x4_t val, const int lane)
+ A64: ST4 { Vt.4S, Vt+1.4S, Vt+2.4S, Vt+3.4S }[index], [Xn]
+
To be added.
@@ -24592,7 +24943,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst3_lane_f32 (float32_t * ptr, float32x2x3_t val, const int lane)
+ A64: ST3 { Vt.4S, Vt+1.4S, Vt+2.4S }[index], [Xn]
+
To be added.
@@ -24633,7 +24987,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst2_lane_f32 (float32_t * ptr, float32x2x2_t val, const int lane)
+ A64: ST2 { Vt.4S, Vt+1.4S }[index], [Xn]
+
To be added.
@@ -24674,7 +25031,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst4_lane_s16 (int16_t * ptr, int16x8x4_t val, const int lane)
+ A64: ST4 { Vt.8H, Vt+1.8H, Vt+2.8H, Vt+3.8H }[index], [Xn]
+
To be added.
@@ -24715,7 +25075,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst3_lane_s16 (int16_t * ptr, int16x8x3_t val, const int lane)
+ A64: ST3 { Vt.8H, Vt+1.8H, Vt+2.8H }[index], [Xn]
+
To be added.
@@ -24756,7 +25119,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst2_lane_s16 (int16_t * ptr, int16x8x2_t val, const int lane)
+ A64: ST2 { Vt.8H, Vt+1.8H }[index], [Xn]
+
To be added.
@@ -24797,7 +25163,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst4_lane_s32 (int32_t * ptr, int32x4x4_t val, const int lane)
+ A64: ST4 { Vt.4S, Vt+1.4S, Vt+2.4S, Vt+3.4S }[index], [Xn]
+
To be added.
@@ -24838,7 +25207,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst3_lane_s32 (int32_t * ptr, int32x4x3_t val, const int lane)
+ A64: ST3 { Vt.4S, Vt+1.4S, Vt+2.4S }[index], [Xn]
+
To be added.
@@ -24879,7 +25251,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void vst2_lane_s32 (int32_t * ptr, int32x4x2_t val, const int lane)
+ A64: ST2 { Vt.4S, Vt+1.4S }[index], [Xn]
+
To be added.
@@ -24920,7 +25295,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.2D, Vt+1.2D, Vt+2.2D, Vt+3.2D }[index], [Xn]
+
To be added.
@@ -24961,7 +25338,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.2D, Vt+1.2D, Vt+2.2D }[index], [Xn]
+
To be added.
@@ -25002,7 +25381,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.2D, Vt+1.2D }[index], [Xn]
+
To be added.
@@ -25034,7 +25415,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
@@ -25066,7 +25449,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
@@ -25098,7 +25483,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
@@ -25130,7 +25517,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
@@ -25162,7 +25551,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
@@ -25194,7 +25585,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
@@ -25226,7 +25619,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
@@ -25258,7 +25653,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
@@ -25290,7 +25687,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
@@ -25322,7 +25721,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
@@ -25354,7 +25755,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
@@ -25386,7 +25789,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
@@ -25418,7 +25823,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
@@ -25450,7 +25857,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
@@ -25482,7 +25891,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
@@ -25514,7 +25925,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.16B, Vn+1.16B, Vn+2.16B, Vn+3.16B }, [Xn]
+
To be added.
@@ -25546,7 +25959,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.16B, Vn+1.16B, Vn+2.16B }, [Xn]
+
To be added.
@@ -25578,7 +25993,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.16B, Vn+1.16B }, [Xn]
+
To be added.
@@ -25610,7 +26027,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
@@ -25642,7 +26061,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
@@ -25674,7 +26095,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
@@ -25706,7 +26129,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.8H, Vn+1.8H, Vn+2.8H, Vn+3.8H }, [Xn]
+
To be added.
@@ -25738,7 +26163,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.8H, Vn+1.8H, Vn+2.8H }, [Xn]
+
To be added.
@@ -25770,7 +26197,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.8H, Vn+1.8H }, [Xn]
+
To be added.
@@ -25802,7 +26231,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.4S }, [Xn]
+
To be added.
@@ -25834,7 +26265,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.4S, Vn+1.4S, Vn+2.4S }, [Xn]
+
To be added.
@@ -25866,7 +26299,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.4S, Vn+1.4S }, [Xn]
+
To be added.
@@ -25898,7 +26333,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.2D, Vn+1.2D, Vn+2.2D, Vn+3.2D }, [Xn]
+
To be added.
@@ -25930,7 +26367,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.2D, Vn+1.2D, Vn+2.2D }, [Xn]
+
To be added.
@@ -25962,7 +26401,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.2D, Vn+1.2D }, [Xn]
+
To be added.
diff --git a/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml b/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml
index aee8334db64..4bc11d88a1b 100644
--- a/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/AdvSimd.xml
@@ -20609,7 +20609,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
To be added.
@@ -20639,7 +20641,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
To be added.
@@ -20669,7 +20673,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -20699,7 +20705,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
To be added.
@@ -20729,7 +20737,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -20759,7 +20769,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
To be added.
@@ -20789,7 +20801,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -20819,7 +20833,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
To be added.
@@ -20849,7 +20865,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
To be added.
@@ -20879,7 +20897,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -20909,7 +20929,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
To be added.
@@ -20939,7 +20961,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -20969,7 +20993,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
To be added.
@@ -20999,7 +21025,9 @@
To be added.
- To be added.
+
+ A64: LD2 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -21029,7 +21057,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
To be added.
@@ -21059,7 +21089,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
To be added.
@@ -21089,7 +21121,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -21119,7 +21153,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
To be added.
@@ -21149,7 +21185,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -21179,7 +21217,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
To be added.
@@ -21209,7 +21249,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -21239,7 +21281,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
To be added.
@@ -21269,7 +21313,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
To be added.
@@ -21299,7 +21345,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -21329,7 +21377,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
To be added.
@@ -21359,7 +21409,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -21389,7 +21441,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
To be added.
@@ -21419,7 +21473,9 @@
To be added.
- To be added.
+
+ A64: LD3 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -21449,7 +21505,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
To be added.
@@ -21479,7 +21537,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
To be added.
@@ -21509,7 +21569,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -21539,7 +21601,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
To be added.
@@ -21569,7 +21633,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -21599,7 +21665,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
To be added.
@@ -21629,7 +21697,9 @@
To be added.
- To be added.
+
+ A64: LD1 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -21659,7 +21729,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
To be added.
@@ -21689,7 +21761,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
To be added.
@@ -21719,7 +21793,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -21749,7 +21825,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
To be added.
@@ -21779,7 +21857,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.4S, Vn+1.4S, Vn+2.4S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -21809,7 +21889,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
To be added.
@@ -21839,7 +21921,9 @@
To be added.
- To be added.
+
+ A64: LD4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -22611,7 +22695,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }[Vm], [Xn]
+
To be added.
To be added.
@@ -22652,7 +22738,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.8B, Vn+1.8B, Vn+2.8B }[Vm], [Xn]
+
To be added.
To be added.
@@ -22693,7 +22781,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.8B, Vn+1.8B }[Vm], [Xn]
+
To be added.
To be added.
@@ -22734,7 +22824,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }[Vm], [Xn]
+
To be added.
To be added.
@@ -22775,7 +22867,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.4H, Vn+1.4H, Vn+2.4H }[Vm], [Xn]
+
To be added.
To be added.
@@ -22816,7 +22910,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.4H, Vn+1.4H }[Vm], [Xn]
+
To be added.
To be added.
@@ -22857,7 +22953,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -22898,7 +22996,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.2S, Vn+1.2S, Vn+2.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -22939,7 +23039,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.2S, Vn+1.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -22980,7 +23082,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }[Vm], [Xn]
+
To be added.
To be added.
@@ -23021,7 +23125,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.8B, Vn+1.8B, Vn+2.8B }[Vm], [Xn]
+
To be added.
To be added.
@@ -23062,7 +23168,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.8B, Vn+1.8B }[Vm], [Xn]
+
To be added.
To be added.
@@ -23103,7 +23211,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -23144,7 +23254,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.2S, Vn+1.2S, Vn+2.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -23185,7 +23297,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.2S, Vn+1.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -23226,7 +23340,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }[Vm], [Xn]
+
To be added.
To be added.
@@ -23267,7 +23383,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.4H, Vn+1.4H, Vn+2.4H }[Vm], [Xn]
+
To be added.
To be added.
@@ -23308,7 +23426,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.4H, Vn+1.4H }[Vm], [Xn]
+
To be added.
To be added.
@@ -23349,7 +23469,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -23390,7 +23512,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD3 { Vn.2S, Vn+1.2S, Vn+2.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -23431,7 +23555,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: LD2 { Vn.2S, Vn+1.2S }[Vm], [Xn]
+
To be added.
To be added.
@@ -23909,7 +24035,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
To be added.
@@ -23939,7 +24067,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
To be added.
@@ -23969,7 +24099,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -23999,7 +24131,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
To be added.
@@ -24029,7 +24163,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -24059,7 +24195,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
To be added.
@@ -24089,7 +24227,9 @@
To be added.
- To be added.
+
+ A64: LD2R { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
To be added.
@@ -24119,7 +24259,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
To be added.
@@ -24149,7 +24291,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
To be added.
@@ -24179,7 +24323,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -24209,7 +24355,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
To be added.
@@ -24239,7 +24387,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -24269,7 +24419,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
To be added.
@@ -24299,7 +24451,9 @@
To be added.
- To be added.
+
+ A64: LD3R { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
To be added.
@@ -24329,7 +24483,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
To be added.
@@ -24359,7 +24515,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
To be added.
@@ -24389,7 +24547,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -24419,7 +24579,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
To be added.
@@ -24449,7 +24611,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -24479,7 +24643,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
To be added.
@@ -24509,7 +24675,9 @@
To be added.
- To be added.
+
+ A64: LD4R { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
To be added.
@@ -58742,7 +58910,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
@@ -58774,7 +58944,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
@@ -58806,7 +58978,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
@@ -58970,7 +59144,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
@@ -59002,7 +59178,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
@@ -59034,7 +59212,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
@@ -59132,7 +59312,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
@@ -59164,7 +59346,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
@@ -59196,7 +59380,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
@@ -59360,7 +59546,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
@@ -59392,7 +59580,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
@@ -59424,7 +59614,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
@@ -59522,7 +59714,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
@@ -59554,7 +59748,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
@@ -59586,7 +59782,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
@@ -59684,7 +59882,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
@@ -59716,7 +59916,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
@@ -59748,7 +59950,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
@@ -59846,7 +60050,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
@@ -59878,7 +60084,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
@@ -59910,7 +60118,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST1 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
@@ -60101,7 +60311,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.8B, Vt+1.8B, Vt+2.8B, Vt+3.8B }[index], [Xn]
+
To be added.
@@ -60142,7 +60354,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.8B, Vt+1.8B, Vt+2.8B }[index], [Xn]
+
To be added.
@@ -60183,7 +60397,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.8B, Vt+1.8B }[index], [Xn]
+
To be added.
@@ -60350,7 +60566,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.4H, Vt+1.4H, Vt+2.4H, Vt+3.4H }[index], [Xn]
+
To be added.
@@ -60391,7 +60609,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.4H, Vt+1.4H, Vt+2.4H }[index], [Xn]
+
To be added.
@@ -60432,7 +60652,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.4H, Vt+1.4H }[index], [Xn]
+
To be added.
@@ -60557,7 +60779,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.2S, Vt+1.2S, Vt+2.2S, Vt+3.2S }[index], [Xn]
+
To be added.
@@ -60598,7 +60822,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.2S, Vt+1.2S, Vt+2.2S }[index], [Xn]
+
To be added.
@@ -60639,7 +60865,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.2S, Vt+1.2S }[index], [Xn]
+
To be added.
@@ -60806,7 +61034,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.8B, Vt+1.8B, Vt+2.8B, Vt+3.8B }[index], [Xn]
+
To be added.
@@ -60847,7 +61077,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.8B, Vt+1.8B, Vt+2.8B }[index], [Xn]
+
To be added.
@@ -60888,7 +61120,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.8B, Vt+1.8B }[index], [Xn]
+
To be added.
@@ -61013,7 +61247,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.2S, Vt+1.2S, Vt+2.2S, Vt+3.2S }[index], [Xn]
+
To be added.
@@ -61054,7 +61290,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.2S, Vt+1.2S, Vt+2.2S }[index], [Xn]
+
To be added.
@@ -61095,7 +61333,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.2S, Vt+1.2S }[index], [Xn]
+
To be added.
@@ -61220,7 +61460,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.4H, Vt+1.4H, Vt+2.4H, Vt+3.4H }[index], [Xn]
+
To be added.
@@ -61261,7 +61503,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.4H, Vt+1.4H, Vt+2.4H }[index], [Xn]
+
To be added.
@@ -61302,7 +61546,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.4H, Vt+1.4H }[index], [Xn]
+
To be added.
@@ -61427,7 +61673,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vt.2S, Vt+1.2S, Vt+2.2S, Vt+3.2S }[index], [Xn]
+
To be added.
@@ -61468,7 +61716,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vt.2S, Vt+1.2S, Vt+2.2S }[index], [Xn]
+
To be added.
@@ -61509,7 +61759,9 @@
To be added.
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vt.2S, Vt+1.2S }[index], [Xn]
+
To be added.
@@ -61583,7 +61835,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
@@ -61615,7 +61869,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
@@ -61647,7 +61903,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
@@ -61679,7 +61937,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
@@ -61711,7 +61971,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
@@ -61743,7 +62005,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
@@ -61775,7 +62039,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
@@ -61807,7 +62073,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
@@ -61839,7 +62107,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
@@ -61871,7 +62141,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.8B, Vn+1.8B, Vn+2.8B, Vn+3.8B }, [Xn]
+
To be added.
@@ -61903,7 +62175,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.8B, Vn+1.8B, Vn+2.8B }, [Xn]
+
To be added.
@@ -61935,7 +62209,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.8B, Vn+1.8B }, [Xn]
+
To be added.
@@ -61967,7 +62243,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
@@ -61999,7 +62277,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
@@ -62031,7 +62311,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
@@ -62063,7 +62345,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.4H, Vn+1.4H, Vn+2.4H, Vn+3.4H }, [Xn]
+
To be added.
@@ -62095,7 +62379,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.4H, Vn+1.4H, Vn+2.4H }, [Xn]
+
To be added.
@@ -62127,7 +62413,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.4H, Vn+1.4H }, [Xn]
+
To be added.
@@ -62159,7 +62447,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST4 { Vn.2S, Vn+1.2S, Vn+2.2S, Vn+3.2S }, [Xn]
+
To be added.
@@ -62191,7 +62481,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST3 { Vn.2S, Vn+1.2S, Vn+2.2S }, [Xn]
+
To be added.
@@ -62223,7 +62515,9 @@
To be added.
To be added.
- To be added.
+
+ A64: ST2 { Vn.2S, Vn+1.2S }, [Xn]
+
To be added.
diff --git a/xml/System.Runtime.Intrinsics.Arm/Aes+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/Aes+Arm64.xml
index 435776a4349..c8c9768b454 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Aes+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Aes+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARM AES hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Aes.xml b/xml/System.Runtime.Intrinsics.Arm/Aes.xml
index 90e8eb89c53..ff66224d4fc 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Aes.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Aes.xml
@@ -151,9 +151,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/ArmBase+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/ArmBase+Arm64.xml
index bb395b09e1b..d20e7075c4a 100644
--- a/xml/System.Runtime.Intrinsics.Arm/ArmBase+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/ArmBase+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARM base hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/ArmBase.xml b/xml/System.Runtime.Intrinsics.Arm/ArmBase.xml
index eeca10e3f9f..e363477477c 100644
--- a/xml/System.Runtime.Intrinsics.Arm/ArmBase.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/ArmBase.xml
@@ -48,9 +48,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Crc32+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/Crc32+Arm64.xml
index 2b0dc6b6a7f..d62e7ac1588 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Crc32+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Crc32+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARM Crc32 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -110,9 +110,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Crc32.xml b/xml/System.Runtime.Intrinsics.Arm/Crc32.xml
index 0200218d609..0da83de85f8 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Crc32.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Crc32.xml
@@ -258,9 +258,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Dp+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/Dp+Arm64.xml
index dff71cd5eaa..e6a3e3c7bda 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Dp+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Dp+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARMv8.2-DotProd hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Dp.xml b/xml/System.Runtime.Intrinsics.Arm/Dp.xml
index aac25e0a37c..dc2cc864a8e 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Dp.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Dp.xml
@@ -516,9 +516,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Rdm+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/Rdm+Arm64.xml
index 53edfc79b29..5f942450263 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Rdm+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Rdm+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARMv8.1-RDMA hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Rdm.xml b/xml/System.Runtime.Intrinsics.Arm/Rdm.xml
index 5c831875b45..c5f6f52f337 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Rdm.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Rdm.xml
@@ -48,9 +48,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Sha1+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/Sha1+Arm64.xml
index a33a8993df5..db04903cc35 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Sha1+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Sha1+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARM SHA1 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Sha1.xml b/xml/System.Runtime.Intrinsics.Arm/Sha1.xml
index e00f5257e1d..95b005e11df 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Sha1.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Sha1.xml
@@ -192,9 +192,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Sha256+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/Sha256+Arm64.xml
index 4559c3a60e7..b670dbe4349 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Sha256+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Sha256+Arm64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the ARM SHA256 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Sha256.xml b/xml/System.Runtime.Intrinsics.Arm/Sha256.xml
index 15f7f30706c..f902f09f30b 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Sha256.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Sha256.xml
@@ -122,9 +122,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Sve+Arm64.xml b/xml/System.Runtime.Intrinsics.Arm/Sve+Arm64.xml
index eeb7cb9675d..b9bc997b4bb 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Sve+Arm64.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Sve+Arm64.xml
@@ -14,7 +14,7 @@
- To be added.
+ Provides access to the ARM SVE hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -34,9 +34,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.Arm/Sve.xml b/xml/System.Runtime.Intrinsics.Arm/Sve.xml
index 3b8f4f924db..7b5566655f9 100644
--- a/xml/System.Runtime.Intrinsics.Arm/Sve.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/Sve.xml
@@ -24,7 +24,7 @@
- To be added.
+ Provides access to the ARM SVE hardware instructions via intrinsics.
To be added.
@@ -48,7 +48,13 @@
To be added.
- To be added.
+
+ svfloat64_t svabs[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svabs[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svabs[_f64]_z(svbool_t pg, svfloat64_t op)
+ FABS Ztied.D, Pg/M, Zop.D
+ FABS Ztied.D, Pg/M, Ztied.D
+
To be added.
To be added.
@@ -73,7 +79,12 @@
To be added.
- To be added.
+
+ svint16_t svabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op)
+ svint16_t svabs[_s16]_x(svbool_t pg, svint16_t op)
+ svint16_t svabs[_s16]_z(svbool_t pg, svint16_t op)
+ ABS Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -98,7 +109,12 @@
To be added.
- To be added.
+
+ svint32_t svabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svabs[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svabs[_s32]_z(svbool_t pg, svint32_t op)
+ ABS Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -123,7 +139,12 @@
To be added.
- To be added.
+
+ svint64_t svabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svabs[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svabs[_s64]_z(svbool_t pg, svint64_t op)
+ ABS Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -148,7 +169,12 @@
To be added.
- To be added.
+
+ svint8_t svabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op)
+ svint8_t svabs[_s8]_x(svbool_t pg, svint8_t op)
+ svint8_t svabs[_s8]_z(svbool_t pg, svint8_t op)
+ ABS Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -173,7 +199,12 @@
To be added.
- To be added.
+
+ svfloat32_t svabs[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svabs[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svabs[_f32]_z(svbool_t pg, svfloat32_t op)
+ FABS Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -200,7 +231,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svacgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FACGT Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -227,7 +261,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svacgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FACGT Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -254,7 +291,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svacge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FACGE Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -281,7 +321,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svacge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FACGE Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -308,7 +351,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svaclt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FACLT Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -335,7 +381,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svaclt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FACLT Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -362,7 +411,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svacle[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FACLE Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -389,7 +441,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svacle[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FACLE Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -416,7 +471,11 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svabd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svabd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ UABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -443,7 +502,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svabd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svabd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svabd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -470,7 +534,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svabd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svabd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svabd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ SABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -497,7 +566,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svabd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svabd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svabd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ SABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -524,7 +598,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svabd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svabd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svabd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ SABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -551,7 +630,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svabd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svabd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svabd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ SABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -578,7 +662,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svabd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svabd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svabd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -605,7 +694,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svabd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svabd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svabd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ UABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -632,7 +726,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svabd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svabd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svabd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ UABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -659,7 +758,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svabd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svabd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svabd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ UABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -686,7 +790,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ ADD Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -713,7 +822,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FADD Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -740,7 +854,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ ADD Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -767,7 +886,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ ADD Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -794,7 +918,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ ADD Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -821,7 +950,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ ADD Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -848,7 +982,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FADD Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -875,7 +1014,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ ADD Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -902,7 +1046,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ ADD Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -929,7 +1078,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ ADD Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -954,7 +1108,10 @@
To be added.
- To be added.
+
+ uint64_t svaddv[_u8](svbool_t pg, svuint8_t op)
+ UADDV Dresult, Pg, Zop.B
+
To be added.
To be added.
@@ -979,7 +1136,10 @@
To be added.
- To be added.
+
+ float64_t svaddv[_f64](svbool_t pg, svfloat64_t op)
+ FADDV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -1004,7 +1164,10 @@
To be added.
- To be added.
+
+ int64_t svaddv[_s16](svbool_t pg, svint16_t op)
+ SADDV Dresult, Pg, Zop.H
+
To be added.
To be added.
@@ -1029,7 +1192,10 @@
To be added.
- To be added.
+
+ int64_t svaddv[_s32](svbool_t pg, svint32_t op)
+ SADDV Dresult, Pg, Zop.S
+
To be added.
To be added.
@@ -1054,7 +1220,10 @@
To be added.
- To be added.
+
+ int64_t svaddv[_s64](svbool_t pg, svint64_t op)
+ UADDV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -1079,7 +1248,10 @@
To be added.
- To be added.
+
+ int64_t svaddv[_s8](svbool_t pg, svint8_t op)
+ SADDV Dresult, Pg, Zop.B
+
To be added.
To be added.
@@ -1104,7 +1276,10 @@
To be added.
- To be added.
+
+ float32_t svaddv[_f32](svbool_t pg, svfloat32_t op)
+ FADDV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -1129,7 +1304,10 @@
To be added.
- To be added.
+
+ uint64_t svaddv[_u16](svbool_t pg, svuint16_t op)
+ UADDV Dresult, Pg, Zop.H
+
To be added.
To be added.
@@ -1154,7 +1332,10 @@
To be added.
- To be added.
+
+ uint64_t svaddv[_u32](svbool_t pg, svuint32_t op)
+ UADDV Dresult, Pg, Zop.S
+
To be added.
To be added.
@@ -1179,7 +1360,10 @@
To be added.
- To be added.
+
+ uint64_t svaddv[_u64](svbool_t pg, svuint64_t op)
+ UADDV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -1215,7 +1399,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svcadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation)
+ svfloat64_t svcadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation)
+ svfloat64_t svcadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation)
+ FCADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D, #imm_rotation
+
To be added.
To be added.
@@ -1251,7 +1440,13 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svcadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation)
+ svfloat32_t svcadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation)
+ svfloat32_t svcadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation)
+ FCADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S, #imm_rotation
+ FCADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S, #imm_rotation
+
To be added.
To be added.
@@ -1278,7 +1473,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svqadd[_u8](svuint8_t op1, svuint8_t op2)
+ UQADD Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -1305,7 +1503,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svqadd[_s16](svint16_t op1, svint16_t op2)
+ SQADD Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -1332,7 +1533,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svqadd[_s32](svint32_t op1, svint32_t op2)
+ SQADD Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -1359,7 +1563,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svqadd[_s64](svint64_t op1, svint64_t op2)
+ SQADD Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -1386,7 +1593,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svqadd[_s8](svint8_t op1, svint8_t op2)
+ SQADD Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -1413,7 +1623,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svqadd[_u16](svuint16_t op1, svuint16_t op2)
+ UQADD Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -1440,7 +1653,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svqadd[_u32](svuint32_t op1, svuint32_t op2)
+ UQADD Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -1467,7 +1683,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svqadd[_u64](svuint64_t op1, svuint64_t op2)
+ UQADD Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -1494,7 +1713,10 @@
To be added.
To be added.
- To be added.
+
+ float64_t svadda[_f64](svbool_t pg, float64_t initial, svfloat64_t op)
+ FADDA Dtied, Pg, Dtied, Zop.D
+
To be added.
To be added.
@@ -1521,7 +1743,10 @@
To be added.
To be added.
- To be added.
+
+ float32_t svadda[_f32](svbool_t pg, float32_t initial, svfloat32_t op)
+ FADDA Stied, Pg, Stied, Zop.S
+
To be added.
To be added.
@@ -1548,7 +1773,14 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svand[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svand[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svand[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1575,7 +1807,14 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svand[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svand[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svand[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1602,7 +1841,14 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svand[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svand[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svand[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1629,7 +1875,14 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svand[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svand[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svand[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1656,7 +1909,14 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svand[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svand[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svand[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1683,7 +1943,14 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svand[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svand[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svand[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1710,7 +1977,14 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svand[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svand[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svand[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1737,7 +2011,14 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svand[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svand[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svand[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ AND Zresult.D, Zop1.D, Zop2.D
+ svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ AND Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1762,7 +2043,10 @@
To be added.
- To be added.
+
+ uint8_t svandv[_u8](svbool_t pg, svuint8_t op)
+ ANDV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -1787,7 +2071,10 @@
To be added.
- To be added.
+
+ int16_t svandv[_s16](svbool_t pg, svint16_t op)
+ ANDV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -1812,7 +2099,10 @@
To be added.
- To be added.
+
+ int32_t svandv[_s32](svbool_t pg, svint32_t op)
+ ANDV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -1837,7 +2127,10 @@
To be added.
- To be added.
+
+ int64_t svandv[_s64](svbool_t pg, svint64_t op)
+ ANDV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -1862,7 +2155,10 @@
To be added.
- To be added.
+
+ int8_t svandv[_s8](svbool_t pg, svint8_t op)
+ ANDV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -1887,7 +2183,10 @@
To be added.
- To be added.
+
+ uint16_t svandv[_u16](svbool_t pg, svuint16_t op)
+ ANDV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -1912,7 +2211,10 @@
To be added.
- To be added.
+
+ uint32_t svandv[_u32](svbool_t pg, svuint32_t op)
+ ANDV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -1937,7 +2239,10 @@
To be added.
- To be added.
+
+ uint64_t svandv[_u64](svbool_t pg, svuint64_t op)
+ ANDV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -1964,7 +2269,15 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svbic[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svbic[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svbic[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -1991,7 +2304,15 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svbic[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svbic[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svbic[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -2018,7 +2339,15 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svbic[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svbic[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svbic[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -2045,7 +2374,15 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svbic[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svbic[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svbic[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -2072,7 +2409,15 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svbic[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svbic[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svbic[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -2099,7 +2444,15 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svbic[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svbic[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svbic[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -2126,7 +2479,15 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svbic[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svbic[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svbic[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -2153,7 +2514,15 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svbic[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svbic[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svbic[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ BIC Zresult.D, Zop1.D, Zop2.D
+ svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BIC Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -2178,7 +2547,12 @@
To be added.
- To be added.
+
+ svuint8_t svcnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op)
+ svuint8_t svcnot[_u8]_x(svbool_t pg, svuint8_t op)
+ svuint8_t svcnot[_u8]_z(svbool_t pg, svuint8_t op)
+ CNOT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -2203,7 +2577,12 @@
To be added.
- To be added.
+
+ svint16_t svcnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op)
+ svint16_t svcnot[_s16]_x(svbool_t pg, svint16_t op)
+ svint16_t svcnot[_s16]_z(svbool_t pg, svint16_t op)
+ CNOT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -2228,7 +2607,12 @@
To be added.
- To be added.
+
+ svint32_t svcnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svcnot[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svcnot[_s32]_z(svbool_t pg, svint32_t op)
+ CNOT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -2253,7 +2637,12 @@
To be added.
- To be added.
+
+ svint64_t svcnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svcnot[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svcnot[_s64]_z(svbool_t pg, svint64_t op)
+ CNOT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -2278,7 +2667,12 @@
To be added.
- To be added.
+
+ svint8_t svcnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op)
+ svint8_t svcnot[_s8]_x(svbool_t pg, svint8_t op)
+ svint8_t svcnot[_s8]_z(svbool_t pg, svint8_t op)
+ CNOT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -2303,7 +2697,12 @@
To be added.
- To be added.
+
+ svuint16_t svcnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
+ svuint16_t svcnot[_u16]_x(svbool_t pg, svuint16_t op)
+ svuint16_t svcnot[_u16]_z(svbool_t pg, svuint16_t op)
+ CNOT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -2328,7 +2727,12 @@
To be added.
- To be added.
+
+ svuint32_t svcnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svcnot[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svcnot[_u32]_z(svbool_t pg, svuint32_t op)
+ CNOT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -2353,7 +2757,12 @@
To be added.
- To be added.
+
+ svuint64_t svcnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svcnot[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svcnot[_u64]_z(svbool_t pg, svuint64_t op)
+ CNOT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -2380,7 +2789,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svcompact[_f64](svbool_t pg, svfloat64_t op)
+ COMPACT Zresult.D, Pg, Zop.D
+
To be added.
To be added.
@@ -2407,7 +2819,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svcompact[_s32](svbool_t pg, svint32_t op)
+ COMPACT Zresult.S, Pg, Zop.S
+
To be added.
To be added.
@@ -2434,7 +2849,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svcompact[_s64](svbool_t pg, svint64_t op)
+ COMPACT Zresult.D, Pg, Zop.D
+
To be added.
To be added.
@@ -2461,7 +2879,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svcompact[_f32](svbool_t pg, svfloat32_t op)
+ COMPACT Zresult.S, Pg, Zop.S
+
To be added.
To be added.
@@ -2488,7 +2909,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svcompact[_u32](svbool_t pg, svuint32_t op)
+ COMPACT Zresult.S, Pg, Zop.S
+
To be added.
To be added.
@@ -2515,7 +2939,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svcompact[_u64](svbool_t pg, svuint64_t op)
+ COMPACT Zresult.D, Pg, Zop.D
+
To be added.
To be added.
@@ -2542,7 +2969,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -2569,7 +2999,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FCMEQ Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -2596,7 +3029,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -2623,7 +3059,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2)
+ CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -2650,7 +3089,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -2677,7 +3119,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2)
+ CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -2704,7 +3149,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ CMPEQ Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -2731,7 +3179,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2)
+ CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -2758,7 +3209,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -2785,7 +3239,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FCMEQ Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -2812,7 +3269,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -2839,7 +3299,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -2866,7 +3329,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpeq[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ CMPEQ Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -2893,7 +3359,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ CMPHI Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -2920,7 +3389,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2)
+ CMPHI Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -2947,7 +3419,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FCMGT Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -2974,7 +3449,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ CMPGT Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -3001,7 +3479,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2)
+ CMPGT Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -3028,7 +3509,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ CMPGT Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -3055,7 +3539,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2)
+ CMPGT Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -3082,7 +3569,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ CMPGT Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -3109,7 +3599,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2)
+ CMPGT Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -3136,7 +3629,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ CMPGT Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -3163,7 +3659,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FCMGT Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -3190,7 +3689,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ CMPHI Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -3217,7 +3719,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2)
+ CMPHI Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -3244,7 +3749,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ CMPHI Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -3271,7 +3779,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2)
+ CMPHI Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -3298,7 +3809,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpgt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ CMPHI Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -3325,7 +3839,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ CMPHS Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -3352,7 +3869,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2)
+ CMPHS Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -3379,7 +3899,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FCMGE Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -3406,7 +3929,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ CMPGE Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -3433,7 +3959,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2)
+ CMPGE Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -3460,7 +3989,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ CMPGE Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -3487,7 +4019,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2)
+ CMPGE Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -3514,7 +4049,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ CMPGE Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -3541,7 +4079,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2)
+ CMPGE Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -3568,7 +4109,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ CMPGE Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -3595,7 +4139,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FCMGE Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -3622,7 +4169,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ CMPHS Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -3649,7 +4199,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2)
+ CMPHS Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -3676,7 +4229,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ CMPHS Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -3703,7 +4259,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2)
+ CMPHS Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -3730,7 +4289,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpge[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ CMPHS Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -3757,7 +4319,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ CMPHI Presult.B, Pg/Z, Zop2.B, Zop1.B
+
To be added.
To be added.
@@ -3784,7 +4349,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2)
+ CMPLO Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -3811,7 +4379,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FCMGT Presult.D, Pg/Z, Zop2.D, Zop1.D
+
To be added.
To be added.
@@ -3838,7 +4409,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ CMPGT Presult.H, Pg/Z, Zop2.H, Zop1.H
+
To be added.
To be added.
@@ -3865,7 +4439,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2)
+ CMPLT Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -3892,7 +4469,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ CMPGT Presult.S, Pg/Z, Zop2.S, Zop1.S
+
To be added.
To be added.
@@ -3919,7 +4499,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2)
+ CMPLT Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -3946,7 +4529,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ CMPGT Presult.D, Pg/Z, Zop2.D, Zop1.D
+
To be added.
To be added.
@@ -3973,7 +4559,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2)
+ CMPLT Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -4000,7 +4589,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ CMPGT Presult.B, Pg/Z, Zop2.B, Zop1.B
+
To be added.
To be added.
@@ -4027,7 +4619,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FCMGT Presult.S, Pg/Z, Zop2.S, Zop1.S
+
To be added.
To be added.
@@ -4054,7 +4649,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ CMPHI Presult.H, Pg/Z, Zop2.H, Zop1.H
+
To be added.
To be added.
@@ -4081,7 +4679,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2)
+ CMPLO Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -4108,7 +4709,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ CMPHI Presult.S, Pg/Z, Zop2.S, Zop1.S
+
To be added.
To be added.
@@ -4135,7 +4739,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2)
+ CMPLO Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -4162,7 +4769,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmplt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ CMPHI Presult.D, Pg/Z, Zop2.D, Zop1.D
+
To be added.
To be added.
@@ -4189,7 +4799,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ CMPHS Presult.B, Pg/Z, Zop2.B, Zop1.B
+
To be added.
To be added.
@@ -4216,7 +4829,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2)
+ CMPLS Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -4243,7 +4859,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FCMGE Presult.D, Pg/Z, Zop2.D, Zop1.D
+
To be added.
To be added.
@@ -4270,7 +4889,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ CMPGE Presult.H, Pg/Z, Zop2.H, Zop1.H
+
To be added.
To be added.
@@ -4297,7 +4919,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2)
+ CMPLE Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -4324,7 +4949,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ CMPGE Presult.S, Pg/Z, Zop2.S, Zop1.S
+
To be added.
To be added.
@@ -4351,7 +4979,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2)
+ CMPLE Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -4378,7 +5009,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ CMPGE Presult.D, Pg/Z, Zop2.D, Zop1.D
+
To be added.
To be added.
@@ -4405,7 +5039,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2)
+ CMPLE Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -4432,7 +5069,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ CMPGE Presult.B, Pg/Z, Zop2.B, Zop1.B
+
To be added.
To be added.
@@ -4459,7 +5099,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FCMGE Presult.S, Pg/Z, Zop2.S, Zop1.S
+
To be added.
To be added.
@@ -4486,7 +5129,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ CMPHS Presult.H, Pg/Z, Zop2.H, Zop1.H
+
To be added.
To be added.
@@ -4513,7 +5159,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2)
+ CMPLS Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -4540,7 +5189,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ CMPHS Presult.S, Pg/Z, Zop2.S, Zop1.S
+
To be added.
To be added.
@@ -4567,7 +5219,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2)
+ CMPLS Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -4594,7 +5249,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmple[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ CMPHS Presult.D, Pg/Z, Zop2.D, Zop1.D
+
To be added.
To be added.
@@ -4621,7 +5279,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -4648,7 +5309,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FCMNE Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -4675,7 +5339,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -4702,7 +5369,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2)
+ CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.D
+
To be added.
To be added.
@@ -4729,7 +5399,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -4756,7 +5429,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2)
+ CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.D
+
To be added.
To be added.
@@ -4783,7 +5459,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ CMPNE Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -4810,7 +5489,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2)
+ CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -4837,7 +5519,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -4864,7 +5549,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FCMNE Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -4891,7 +5579,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -4918,7 +5609,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -4945,7 +5639,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpne[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ CMPNE Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -4972,7 +5669,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpuo[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FCMUO Presult.D, Pg/Z, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -4999,7 +5699,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svcmpuo[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FCMUO Presult.S, Pg/Z, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -5026,7 +5729,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrh[_u32base]_[s32]index(svuint32_t bases, svint32_t indices)
+ ADR Zresult.S, [Zbases.S, Zindices.S, LSL #1]
+
To be added.
To be added.
@@ -5053,7 +5759,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrh[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices)
+ ADR Zresult.S, [Zbases.S, Zindices.S, LSL #1]
+
To be added.
To be added.
@@ -5080,7 +5789,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrh[_u64base]_[s64]index(svuint64_t bases, svint64_t indices)
+ ADR Zresult.D, [Zbases.D, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -5107,7 +5819,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrh[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices)
+ ADR Zresult.D, [Zbases.D, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -5134,7 +5849,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrw[_u32base]_[s32]index(svuint32_t bases, svint32_t indices)
+ ADR Zresult.S, [Zbases.S, Zindices.S, LSL #2]
+
To be added.
To be added.
@@ -5161,7 +5879,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrw[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices)
+ ADR Zresult.S, [Zbases.S, Zindices.S, LSL #2]
+
To be added.
To be added.
@@ -5188,7 +5909,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrw[_u64base]_[s64]index(svuint64_t bases, svint64_t indices)
+ ADR Zresult.D, [Zbases.D, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -5215,7 +5939,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrw[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices)
+ ADR Zresult.D, [Zbases.D, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -5242,7 +5969,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrd[_u32base]_[s32]index(svuint32_t bases, svint32_t indices)
+ ADR Zresult.S, [Zbases.S, Zindices.S, LSL #3]
+
To be added.
To be added.
@@ -5269,7 +5999,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrd[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices)
+ ADR Zresult.S, [Zbases.S, Zindices.S, LSL #3]
+
To be added.
To be added.
@@ -5296,7 +6029,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrd[_u64base]_[s64]index(svuint64_t bases, svint64_t indices)
+ ADR Zresult.D, [Zbases.D, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -5323,7 +6059,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrd[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices)
+ ADR Zresult.D, [Zbases.D, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -5350,7 +6089,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrb[_u32base]_[s32]offset(svuint32_t bases, svint32_t offsets)
+ ADR Zresult.S, [Zbases.S, Zoffsets.S]
+
To be added.
To be added.
@@ -5377,7 +6119,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svadrb[_u32base]_[u32]offset(svuint32_t bases, svuint32_t offsets)
+ ADR Zresult.S, [Zbases.S, Zoffsets.S]
+
To be added.
To be added.
@@ -5404,7 +6149,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrb[_u64base]_[s64]offset(svuint64_t bases, svint64_t offsets)
+ ADR Zresult.D, [Zbases.D, Zoffsets.D]
+
To be added.
To be added.
@@ -5431,7 +6179,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svadrb[_u64base]_[u64]offset(svuint64_t bases, svuint64_t offsets)
+ ADR Zresult.D, [Zbases.D, Zoffsets.D]
+
To be added.
To be added.
@@ -5460,7 +6211,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint8_t svclasta[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data)
+ CLASTA Wtied, Pg, Wtied, Zdata.B
+
To be added.
To be added.
@@ -5489,7 +6243,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data)
+ CLASTA Btied, Pg, Btied, Zdata.B
+
To be added.
To be added.
@@ -5518,7 +6275,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ float64_t svclasta[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data)
+ CLASTA Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -5547,7 +6307,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data)
+ CLASTA Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -5576,7 +6339,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int16_t svclasta[_n_s16](svbool_t pg, int16_t fallback, svint16_t data)
+ CLASTA Wtied, Pg, Wtied, Zdata.H
+
To be added.
To be added.
@@ -5605,7 +6371,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data)
+ CLASTA Htied, Pg, Htied, Zdata.H
+
To be added.
To be added.
@@ -5634,7 +6403,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svclasta[_n_s32](svbool_t pg, int32_t fallback, svint32_t data)
+ CLASTA Wtied, Pg, Wtied, Zdata.S
+
To be added.
To be added.
@@ -5663,7 +6435,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data)
+ CLASTA Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -5692,7 +6467,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svclasta[_n_s64](svbool_t pg, int64_t fallback, svint64_t data)
+ CLASTA Xtied, Pg, Xtied, Zdata.D
+
To be added.
To be added.
@@ -5721,7 +6499,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data)
+ CLASTA Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -5750,7 +6531,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data)
+ CLASTA Btied, Pg, Btied, Zdata.B
+
To be added.
To be added.
@@ -5779,7 +6563,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int8_t svclasta[_n_s8](svbool_t pg, int8_t fallback, svint8_t data)
+ CLASTA Wtied, Pg, Wtied, Zdata.B
+
To be added.
To be added.
@@ -5808,7 +6595,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data)
+ CLASTA Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -5837,7 +6627,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ float32_t svclasta[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data)
+ CLASTA Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -5866,7 +6659,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data)
+ CLASTA Htied, Pg, Htied, Zdata.H
+
To be added.
To be added.
@@ -5895,7 +6691,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint16_t svclasta[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data)
+ CLASTA Wtied, Pg, Wtied, Zdata.H
+
To be added.
To be added.
@@ -5924,7 +6723,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data)
+ CLASTA Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -5953,7 +6755,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svclasta[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data)
+ CLASTA Wtied, Pg, Wtied, Zdata.S
+
To be added.
To be added.
@@ -5982,7 +6787,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data)
+ CLASTA Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -6011,7 +6819,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svclasta[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data)
+ CLASTA Xtied, Pg, Xtied, Zdata.D
+
To be added.
To be added.
@@ -6040,7 +6851,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data)
+ CLASTA Ztied.B, Pg, Ztied.B, Zdata.B
+
To be added.
To be added.
@@ -6069,7 +6883,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data)
+ CLASTA Ztied.D, Pg, Ztied.D, Zdata.D
+
To be added.
To be added.
@@ -6098,7 +6915,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data)
+ CLASTA Ztied.H, Pg, Ztied.H, Zdata.H
+
To be added.
To be added.
@@ -6127,7 +6947,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data)
+ CLASTA Ztied.S, Pg, Ztied.S, Zdata.S
+
To be added.
To be added.
@@ -6156,7 +6979,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data)
+ CLASTA Ztied.D, Pg, Ztied.D, Zdata.D
+
To be added.
To be added.
@@ -6185,7 +7011,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data)
+ CLASTA Ztied.B, Pg, Ztied.B, Zdata.B
+
To be added.
To be added.
@@ -6214,7 +7043,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data)
+ CLASTA Ztied.S, Pg, Ztied.S, Zdata.S
+
To be added.
To be added.
@@ -6243,7 +7075,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data)
+ CLASTA Ztied.H, Pg, Ztied.H, Zdata.H
+
To be added.
To be added.
@@ -6272,7 +7107,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data)
+ CLASTA Ztied.S, Pg, Ztied.S, Zdata.S
+
To be added.
To be added.
@@ -6301,7 +7139,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data)
+ CLASTA Ztied.D, Pg, Ztied.D, Zdata.D
+
To be added.
To be added.
@@ -6330,7 +7171,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint8_t svclastb[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data)
+ CLASTB Wtied, Pg, Wtied, Zdata.B
+
To be added.
To be added.
@@ -6359,7 +7203,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data)
+ CLASTB Btied, Pg, Btied, Zdata.B
+
To be added.
To be added.
@@ -6388,7 +7235,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ float64_t svclastb[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data)
+ CLASTB Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -6417,7 +7267,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data)
+ CLASTB Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -6446,7 +7299,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int16_t svclastb[_n_s16](svbool_t pg, int16_t fallback, svint16_t data)
+ CLASTB Wtied, Pg, Wtied, Zdata.H
+
To be added.
To be added.
@@ -6475,7 +7331,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data)
+ CLASTB Htied, Pg, Htied, Zdata.H
+
To be added.
To be added.
@@ -6504,7 +7363,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svclastb[_n_s32](svbool_t pg, int32_t fallback, svint32_t data)
+ CLASTB Wtied, Pg, Wtied, Zdata.S
+
To be added.
To be added.
@@ -6533,7 +7395,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data)
+ CLASTB Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -6562,7 +7427,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svclastb[_n_s64](svbool_t pg, int64_t fallback, svint64_t data)
+ CLASTB Xtied, Pg, Xtied, Zdata.D
+
To be added.
To be added.
@@ -6591,7 +7459,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data)
+ CLASTB Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -6620,7 +7491,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data)
+ CLASTB Btied, Pg, Btied, Zdata.B
+
To be added.
To be added.
@@ -6649,7 +7523,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int8_t svclastb[_n_s8](svbool_t pg, int8_t fallback, svint8_t data)
+ CLASTB Wtied, Pg, Wtied, Zdata.B
+
To be added.
To be added.
@@ -6678,7 +7555,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data)
+ CLASTB Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -6707,7 +7587,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ float32_t svclastb[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data)
+ CLASTB Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -6736,7 +7619,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data)
+ CLASTB Htied, Pg, Htied, Zdata.H
+
To be added.
To be added.
@@ -6765,7 +7651,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint16_t svclastb[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data)
+ CLASTB Wtied, Pg, Wtied, Zdata.H
+
To be added.
To be added.
@@ -6794,7 +7683,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data)
+ CLASTB Stied, Pg, Stied, Zdata.S
+
To be added.
To be added.
@@ -6823,7 +7715,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svclastb[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data)
+ CLASTB Wtied, Pg, Wtied, Zdata.S
+
To be added.
To be added.
@@ -6852,7 +7747,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data)
+ CLASTB Dtied, Pg, Dtied, Zdata.D
+
To be added.
To be added.
@@ -6881,7 +7779,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svclastb[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data)
+ CLASTB Xtied, Pg, Xtied, Zdata.D
+
To be added.
To be added.
@@ -6910,7 +7811,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data)
+ CLASTB Ztied.B, Pg, Ztied.B, Zdata.B
+
To be added.
To be added.
@@ -6939,7 +7843,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data)
+ CLASTB Ztied.D, Pg, Ztied.D, Zdata.D
+
To be added.
To be added.
@@ -6968,7 +7875,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data)
+ CLASTB Ztied.H, Pg, Ztied.H, Zdata.H
+
To be added.
To be added.
@@ -6997,7 +7907,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data)
+ CLASTB Ztied.S, Pg, Ztied.S, Zdata.S
+
To be added.
To be added.
@@ -7026,7 +7939,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data)
+ CLASTB Ztied.D, Pg, Ztied.D, Zdata.D
+
To be added.
To be added.
@@ -7055,7 +7971,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data)
+ CLASTB Ztied.B, Pg, Ztied.B, Zdata.B
+
To be added.
To be added.
@@ -7084,7 +8003,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data)
+ CLASTB Ztied.S, Pg, Ztied.S, Zdata.S
+
To be added.
To be added.
@@ -7113,7 +8035,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data)
+ CLASTB Ztied.H, Pg, Ztied.H, Zdata.H
+
To be added.
To be added.
@@ -7142,7 +8067,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data)
+ CLASTB Ztied.S, Pg, Ztied.S, Zdata.S
+
To be added.
To be added.
@@ -7171,7 +8099,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data)
+ CLASTB Ztied.D, Pg, Ztied.D, Zdata.D
+
To be added.
To be added.
@@ -7200,7 +8131,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svsel[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.B, Pg, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -7229,7 +8164,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svsel[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ SEL Zresult.D, Pg, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -7258,7 +8196,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svsel[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.H, Pg, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -7287,7 +8229,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svsel[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.S, Pg, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -7316,7 +8262,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svsel[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.D, Pg, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -7345,7 +8295,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svsel[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.B, Pg, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -7374,7 +8328,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svsel[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ SEL Zresult.S, Pg, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -7403,7 +8360,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svsel[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.H, Pg, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -7432,7 +8393,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svsel[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.S, Pg, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -7461,7 +8426,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svsel[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2)
+ SEL Zresult.D, Pg, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -7486,7 +8455,12 @@
To be added.
- To be added.
+
+ svfloat64_t svcvt_f64[_s32]_m(svfloat64_t inactive, svbool_t pg, svint32_t op)
+ svfloat64_t svcvt_f64[_s32]_x(svbool_t pg, svint32_t op)
+ svfloat64_t svcvt_f64[_s32]_z(svbool_t pg, svint32_t op)
+ SCVTF Zresult.D, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7511,7 +8485,12 @@
To be added.
- To be added.
+
+ svfloat64_t svcvt_f64[_s64]_m(svfloat64_t inactive, svbool_t pg, svint64_t op)
+ svfloat64_t svcvt_f64[_s64]_x(svbool_t pg, svint64_t op)
+ svfloat64_t svcvt_f64[_s64]_z(svbool_t pg, svint64_t op)
+ SCVTF Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7536,7 +8515,12 @@
To be added.
- To be added.
+
+ svfloat64_t svcvt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat64_t svcvt_f64[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat64_t svcvt_f64[_f32]_z(svbool_t pg, svfloat32_t op)
+ FCVT Zresult.D, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7561,7 +8545,12 @@
To be added.
- To be added.
+
+ svfloat64_t svcvt_f64[_u32]_m(svfloat64_t inactive, svbool_t pg, svuint32_t op)
+ svfloat64_t svcvt_f64[_u32]_x(svbool_t pg, svuint32_t op)
+ svfloat64_t svcvt_f64[_u32]_z(svbool_t pg, svuint32_t op)
+ UCVTF Zresult.D, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7586,7 +8575,12 @@
To be added.
- To be added.
+
+ svfloat64_t svcvt_f64[_u64]_m(svfloat64_t inactive, svbool_t pg, svuint64_t op)
+ svfloat64_t svcvt_f64[_u64]_x(svbool_t pg, svuint64_t op)
+ svfloat64_t svcvt_f64[_u64]_z(svbool_t pg, svuint64_t op)
+ UCVTF Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7611,7 +8605,12 @@
To be added.
- To be added.
+
+ svint32_t svcvt_s32[_f64]_m(svint32_t inactive, svbool_t pg, svfloat64_t op)
+ svint32_t svcvt_s32[_f64]_x(svbool_t pg, svfloat64_t op)
+ svint32_t svcvt_s32[_f64]_z(svbool_t pg, svfloat64_t op)
+ FCVTZS Zresult.S, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7636,7 +8635,12 @@
To be added.
- To be added.
+
+ svint32_t svcvt_s32[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op)
+ svint32_t svcvt_s32[_f32]_x(svbool_t pg, svfloat32_t op)
+ svint32_t svcvt_s32[_f32]_z(svbool_t pg, svfloat32_t op)
+ FCVTZS Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7661,7 +8665,12 @@
To be added.
- To be added.
+
+ svint64_t svcvt_s64[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op)
+ svint64_t svcvt_s64[_f64]_x(svbool_t pg, svfloat64_t op)
+ svint64_t svcvt_s64[_f64]_z(svbool_t pg, svfloat64_t op)
+ FCVTZS Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7686,7 +8695,12 @@
To be added.
- To be added.
+
+ svint64_t svcvt_s64[_f32]_m(svint64_t inactive, svbool_t pg, svfloat32_t op)
+ svint64_t svcvt_s64[_f32]_x(svbool_t pg, svfloat32_t op)
+ svint64_t svcvt_s64[_f32]_z(svbool_t pg, svfloat32_t op)
+ FCVTZS Zresult.D, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7711,7 +8725,12 @@
To be added.
- To be added.
+
+ svfloat32_t svcvt_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat32_t svcvt_f32[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat32_t svcvt_f32[_f64]_z(svbool_t pg, svfloat64_t op)
+ FCVT Zresult.S, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7736,7 +8755,12 @@
To be added.
- To be added.
+
+ svfloat32_t svcvt_f32[_s32]_m(svfloat32_t inactive, svbool_t pg, svint32_t op)
+ svfloat32_t svcvt_f32[_s32]_x(svbool_t pg, svint32_t op)
+ svfloat32_t svcvt_f32[_s32]_z(svbool_t pg, svint32_t op)
+ SCVTF Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7761,7 +8785,12 @@
To be added.
- To be added.
+
+ svfloat32_t svcvt_f32[_s64]_m(svfloat32_t inactive, svbool_t pg, svint64_t op)
+ svfloat32_t svcvt_f32[_s64]_x(svbool_t pg, svint64_t op)
+ svfloat32_t svcvt_f32[_s64]_z(svbool_t pg, svint64_t op)
+ SCVTF Zresult.S, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7786,7 +8815,12 @@
To be added.
- To be added.
+
+ svfloat32_t svcvt_f32[_u32]_m(svfloat32_t inactive, svbool_t pg, svuint32_t op)
+ svfloat32_t svcvt_f32[_u32]_x(svbool_t pg, svuint32_t op)
+ svfloat32_t svcvt_f32[_u32]_z(svbool_t pg, svuint32_t op)
+ UCVTF Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7811,7 +8845,12 @@
To be added.
- To be added.
+
+ svfloat32_t svcvt_f32[_u64]_m(svfloat32_t inactive, svbool_t pg, svuint64_t op)
+ svfloat32_t svcvt_f32[_u64]_x(svbool_t pg, svuint64_t op)
+ svfloat32_t svcvt_f32[_u64]_z(svbool_t pg, svuint64_t op)
+ UCVTF Zresult.S, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7836,7 +8875,12 @@
To be added.
- To be added.
+
+ svuint32_t svcvt_u32[_f64]_m(svuint32_t inactive, svbool_t pg, svfloat64_t op)
+ svuint32_t svcvt_u32[_f64]_x(svbool_t pg, svfloat64_t op)
+ svuint32_t svcvt_u32[_f64]_z(svbool_t pg, svfloat64_t op)
+ FCVTZU Zresult.S, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7861,7 +8905,12 @@
To be added.
- To be added.
+
+ svuint32_t svcvt_u32[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op)
+ svuint32_t svcvt_u32[_f32]_x(svbool_t pg, svfloat32_t op)
+ svuint32_t svcvt_u32[_f32]_z(svbool_t pg, svfloat32_t op)
+ FCVTZU Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7886,7 +8935,12 @@
To be added.
- To be added.
+
+ svuint64_t svcvt_u64[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op)
+ svuint64_t svcvt_u64[_f64]_x(svbool_t pg, svfloat64_t op)
+ svuint64_t svcvt_u64[_f64]_z(svbool_t pg, svfloat64_t op)
+ FCVTZU Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -7911,7 +8965,12 @@
To be added.
- To be added.
+
+ svuint64_t svcvt_u64[_f32]_m(svuint64_t inactive, svbool_t pg, svfloat32_t op)
+ svuint64_t svcvt_u64[_f32]_x(svbool_t pg, svfloat32_t op)
+ svuint64_t svcvt_u64[_f32]_z(svbool_t pg, svfloat32_t op)
+ FCVTZU Zresult.D, Pg/M, Zop.S
+
To be added.
To be added.
@@ -7942,7 +9001,10 @@
To be added.
- To be added.
+
+ uint64_t svcnth_pat(enum svpattern pattern)
+ CNTH Xresult, pattern
+
To be added.
To be added.
@@ -7973,7 +9035,10 @@
To be added.
- To be added.
+
+ uint64_t svcntw_pat(enum svpattern pattern)
+ CNTW Xresult, pattern
+
To be added.
To be added.
@@ -8004,7 +9069,10 @@
To be added.
- To be added.
+
+ uint64_t svcntd_pat(enum svpattern pattern)
+ CNTD Xresult, pattern
+
To be added.
To be added.
@@ -8035,7 +9103,10 @@
To be added.
- To be added.
+
+ uint64_t svcntb_pat(enum svpattern pattern)
+ CNTB Xresult, pattern
+
To be added.
To be added.
@@ -8062,7 +9133,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8089,7 +9163,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8116,7 +9193,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8143,7 +9223,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8170,7 +9253,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8197,7 +9283,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8224,7 +9313,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8251,7 +9343,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op)
+ BRKA Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8280,7 +9375,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8309,7 +9407,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8338,7 +9439,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8367,7 +9471,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8396,7 +9503,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8425,7 +9535,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8454,7 +9567,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8483,7 +9599,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8510,7 +9629,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8537,7 +9659,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8564,7 +9689,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8591,7 +9719,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8618,7 +9749,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8645,7 +9779,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8672,7 +9809,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8699,7 +9839,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op)
+ BRKB Presult.B, Pg/Z, Pop.B
+
To be added.
To be added.
@@ -8728,7 +9871,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8757,7 +9903,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8786,7 +9935,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8815,7 +9967,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8844,7 +9999,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8873,7 +10031,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8902,7 +10063,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8931,7 +10095,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -8958,7 +10125,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -8985,7 +10155,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -9012,7 +10185,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -9039,7 +10215,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -9066,7 +10245,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -9093,7 +10275,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -9120,7 +10305,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -9147,7 +10335,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2)
+ BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B
+
To be added.
To be added.
@@ -9169,7 +10360,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9191,7 +10385,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9213,7 +10410,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9235,7 +10435,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9257,7 +10460,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9279,7 +10485,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9301,7 +10510,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9323,7 +10535,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9345,7 +10560,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9367,7 +10585,10 @@
- To be added.
+
+ svbool_t svpfalse[_b]()
+ PFALSE Presult.B
+
To be added.
To be added.
@@ -9394,7 +10615,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9421,7 +10645,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9448,7 +10675,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9475,7 +10705,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9502,7 +10735,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9529,7 +10765,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9556,7 +10795,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9583,7 +10825,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpfirst[_b](svbool_t pg, svbool_t op)
+ PFIRST Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9610,7 +10855,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpnext_b8(svbool_t pg, svbool_t op)
+ PNEXT Ptied.B, Pg, Ptied.B
+
To be added.
To be added.
@@ -9637,7 +10885,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpnext_b16(svbool_t pg, svbool_t op)
+ PNEXT Ptied.H, Pg, Ptied.H
+
To be added.
To be added.
@@ -9664,7 +10915,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpnext_b32(svbool_t pg, svbool_t op)
+ PNEXT Ptied.S, Pg, Ptied.S
+
To be added.
To be added.
@@ -9691,7 +10945,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svpnext_b64(svbool_t pg, svbool_t op)
+ PNEXT Ptied.D, Pg, Ptied.D
+
To be added.
To be added.
@@ -9722,7 +10979,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b8(enum svpattern pattern)
+ PTRUE Presult.B, pattern
+
To be added.
To be added.
@@ -9753,7 +11013,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b8(enum svpattern pattern)
+ PTRUE Presult.B, pattern
+
To be added.
To be added.
@@ -9784,7 +11047,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b8(enum svpattern pattern)
+ PTRUE Presult.B, pattern
+
To be added.
To be added.
@@ -9815,7 +11081,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b8(enum svpattern pattern)
+ PTRUE Presult.B, pattern
+
To be added.
To be added.
@@ -9846,7 +11115,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b8(enum svpattern pattern)
+ PTRUE Presult.B, pattern
+
To be added.
To be added.
@@ -9877,7 +11149,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b8(enum svpattern pattern)
+ PTRUE Presult.B, pattern
+
To be added.
To be added.
@@ -9908,7 +11183,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b8(enum svpattern pattern)
+ PTRUE Presult.B, pattern
+
To be added.
To be added.
@@ -9939,7 +11217,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b16(enum svpattern pattern)
+ PTRUE Presult.H, pattern
+
To be added.
To be added.
@@ -9970,7 +11251,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b32(enum svpattern pattern)
+ PTRUE Presult.S, pattern
+
To be added.
To be added.
@@ -10001,7 +11285,10 @@
To be added.
- To be added.
+
+ svbool_t svptrue_pat_b64(enum svpattern pattern)
+ PTRUE Presult.D, pattern
+
To be added.
To be added.
@@ -10028,7 +11315,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b16[_s32](int32_t op1, int32_t op2)
+ WHILELT Presult.H, Wop1, Wop2
+
To be added.
To be added.
@@ -10055,7 +11345,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b16[_s64](int64_t op1, int64_t op2)
+ WHILELT Presult.H, Xop1, Xop2
+
To be added.
To be added.
@@ -10082,7 +11375,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b16[_u32](uint32_t op1, uint32_t op2)
+ WHILELO Presult.H, Wop1, Wop2
+
To be added.
To be added.
@@ -10109,7 +11405,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b16[_u64](uint64_t op1, uint64_t op2)
+ WHILELO Presult.H, Xop1, Xop2
+
To be added.
To be added.
@@ -10136,7 +11435,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b32[_s32](int32_t op1, int32_t op2)
+ WHILELT Presult.S, Wop1, Wop2
+
To be added.
To be added.
@@ -10163,7 +11465,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b32[_s64](int64_t op1, int64_t op2)
+ WHILELT Presult.S, Xop1, Xop2
+
To be added.
To be added.
@@ -10190,7 +11495,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b32[_u32](uint32_t op1, uint32_t op2)
+ WHILELO Presult.S, Wop1, Wop2
+
To be added.
To be added.
@@ -10217,7 +11525,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b32[_u64](uint64_t op1, uint64_t op2)
+ WHILELO Presult.S, Xop1, Xop2
+
To be added.
To be added.
@@ -10244,7 +11555,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b64[_s32](int32_t op1, int32_t op2)
+ WHILELT Presult.D, Wop1, Wop2
+
To be added.
To be added.
@@ -10271,7 +11585,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b64[_s64](int64_t op1, int64_t op2)
+ WHILELT Presult.D, Xop1, Xop2
+
To be added.
To be added.
@@ -10298,7 +11615,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b64[_u32](uint32_t op1, uint32_t op2)
+ WHILELO Presult.D, Wop1, Wop2
+
To be added.
To be added.
@@ -10325,7 +11645,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b64[_u64](uint64_t op1, uint64_t op2)
+ WHILELO Presult.D, Xop1, Xop2
+
To be added.
To be added.
@@ -10352,7 +11675,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b8[_s32](int32_t op1, int32_t op2)
+ WHILELT Presult.B, Wop1, Wop2
+
To be added.
To be added.
@@ -10379,7 +11705,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b8[_s64](int64_t op1, int64_t op2)
+ WHILELT Presult.B, Xop1, Xop2
+
To be added.
To be added.
@@ -10406,7 +11735,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b8[_u32](uint32_t op1, uint32_t op2)
+ WHILELO Presult.B, Wop1, Wop2
+
To be added.
To be added.
@@ -10433,7 +11765,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilelt_b8[_u64](uint64_t op1, uint64_t op2)
+ WHILELO Presult.B, Xop1, Xop2
+
To be added.
To be added.
@@ -10460,7 +11795,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b16[_s32](int32_t op1, int32_t op2)
+ WHILELE Presult.H, Wop1, Wop2
+
To be added.
To be added.
@@ -10487,7 +11825,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b16[_s64](int64_t op1, int64_t op2)
+ WHILELE Presult.H, Xop1, Xop2
+
To be added.
To be added.
@@ -10514,7 +11855,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b16[_u32](uint32_t op1, uint32_t op2)
+ WHILELS Presult.H, Wop1, Wop2
+
To be added.
To be added.
@@ -10541,7 +11885,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b16[_u64](uint64_t op1, uint64_t op2)
+ WHILELS Presult.H, Xop1, Xop2
+
To be added.
To be added.
@@ -10568,7 +11915,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b32[_s32](int32_t op1, int32_t op2)
+ WHILELE Presult.S, Wop1, Wop2
+
To be added.
To be added.
@@ -10595,7 +11945,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b32[_s64](int64_t op1, int64_t op2)
+ WHILELE Presult.S, Xop1, Xop2
+
To be added.
To be added.
@@ -10622,7 +11975,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b32[_u32](uint32_t op1, uint32_t op2)
+ WHILELS Presult.S, Wop1, Wop2
+
To be added.
To be added.
@@ -10649,7 +12005,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b32[_u64](uint64_t op1, uint64_t op2)
+ WHILELS Presult.S, Xop1, Xop2
+
To be added.
To be added.
@@ -10676,7 +12035,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b64[_s32](int32_t op1, int32_t op2)
+ WHILELE Presult.D, Wop1, Wop2
+
To be added.
To be added.
@@ -10703,7 +12065,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b64[_s64](int64_t op1, int64_t op2)
+ WHILELE Presult.D, Xop1, Xop2
+
To be added.
To be added.
@@ -10730,7 +12095,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b64[_u32](uint32_t op1, uint32_t op2)
+ WHILELS Presult.D, Wop1, Wop2
+
To be added.
To be added.
@@ -10757,7 +12125,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b64[_u64](uint64_t op1, uint64_t op2)
+ WHILELS Presult.D, Xop1, Xop2
+
To be added.
To be added.
@@ -10784,7 +12155,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b8[_s32](int32_t op1, int32_t op2)
+ WHILELE Presult.B, Wop1, Wop2
+
To be added.
To be added.
@@ -10811,7 +12185,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b8[_s64](int64_t op1, int64_t op2)
+ WHILELE Presult.B, Xop1, Xop2
+
To be added.
To be added.
@@ -10838,7 +12215,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b8[_u32](uint32_t op1, uint32_t op2)
+ WHILELS Presult.B, Wop1, Wop2
+
To be added.
To be added.
@@ -10865,7 +12245,10 @@
To be added.
To be added.
- To be added.
+
+ svbool_t svwhilele_b8[_u64](uint64_t op1, uint64_t op2)
+ WHILELS Presult.B, Xop1, Xop2
+
To be added.
To be added.
@@ -10892,7 +12275,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svdiv[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svdiv[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svdiv[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -10919,7 +12307,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svdiv[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svdiv[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svdiv[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -10948,7 +12341,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3)
+ SDOT Ztied1.S, Zop2.B, Zop3.B
+
To be added.
To be added.
@@ -10977,7 +12373,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3)
+ SDOT Ztied1.D, Zop2.H, Zop3.H
+
To be added.
To be added.
@@ -11006,7 +12405,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svdot[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3)
+ UDOT Ztied1.S, Zop2.B, Zop3.B
+
To be added.
To be added.
@@ -11035,7 +12437,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svdot[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3)
+ UDOT Ztied1.D, Zop2.H, Zop3.H
+
To be added.
To be added.
@@ -11073,7 +12478,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index)
+ SDOT Ztied1.S, Zop2.B, Zop3.B[imm_index]
+
To be added.
To be added.
@@ -11111,7 +12519,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index)
+ SDOT Ztied1.D, Zop2.H, Zop3.H[imm_index]
+
To be added.
To be added.
@@ -11149,7 +12560,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svdot_lane[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_index)
+ UDOT Ztied1.S, Zop2.B, Zop3.B[imm_index]
+
To be added.
To be added.
@@ -11187,7 +12601,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svdot_lane[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index)
+ UDOT Ztied1.D, Zop2.H, Zop3.H[imm_index]
+
To be added.
To be added.
@@ -11221,7 +12638,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svdup_lane[_u8](svuint8_t data, uint8_t index)
+ DUP Zresult.B, Zdata.B[index]
+
To be added.
To be added.
@@ -11255,7 +12675,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svdup_lane[_f64](svfloat64_t data, uint64_t index)
+ DUP Zresult.D, Zdata.D[index]
+
To be added.
To be added.
@@ -11289,7 +12712,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svdup_lane[_s16](svint16_t data, uint16_t index)
+ DUP Zresult.H, Zdata.H[index]
+
To be added.
To be added.
@@ -11323,7 +12749,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svdup_lane[_s32](svint32_t data, uint32_t index)
+ DUP Zresult.S, Zdata.S[index]
+
To be added.
To be added.
@@ -11357,7 +12786,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svdup_lane[_s64](svint64_t data, uint64_t index)
+ DUP Zresult.D, Zdata.D[index]
+
To be added.
To be added.
@@ -11391,7 +12823,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svdup_lane[_s8](svint8_t data, uint8_t index)
+ DUP Zresult.B, Zdata.B[index]
+
To be added.
To be added.
@@ -11425,7 +12860,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svdup_lane[_f32](svfloat32_t data, uint32_t index)
+ DUP Zresult.S, Zdata.S[index]
+
To be added.
To be added.
@@ -11459,7 +12897,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svdup_lane[_u16](svuint16_t data, uint16_t index)
+ DUP Zresult.H, Zdata.H[index]
+
To be added.
To be added.
@@ -11493,7 +12934,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svdup_lane[_u32](svuint32_t data, uint32_t index)
+ DUP Zresult.S, Zdata.S[index]
+
To be added.
To be added.
@@ -11527,7 +12971,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svdup_lane[_u64](svuint64_t data, uint64_t index)
+ DUP Zresult.D, Zdata.D[index]
+
To be added.
To be added.
@@ -11563,7 +13010,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svext[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3
+
To be added.
To be added.
@@ -11599,7 +13049,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svext[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8
+
To be added.
To be added.
@@ -11635,7 +13088,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svext[_s16](svint16_t op1, svint16_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2
+
To be added.
To be added.
@@ -11671,7 +13127,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svext[_s32](svint32_t op1, svint32_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4
+
To be added.
To be added.
@@ -11707,7 +13166,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svext[_s64](svint64_t op1, svint64_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8
+
To be added.
To be added.
@@ -11743,7 +13205,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svext[_s8](svint8_t op1, svint8_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3
+
To be added.
To be added.
@@ -11779,7 +13244,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svext[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4
+
To be added.
To be added.
@@ -11815,7 +13283,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svext[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2
+
To be added.
To be added.
@@ -11851,7 +13322,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svext[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4
+
To be added.
To be added.
@@ -11887,7 +13361,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svext[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3)
+ EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8
+
To be added.
To be added.
@@ -11912,7 +13389,10 @@
To be added.
- To be added.
+
+ svfloat32_t svexpa[_f32](svuint32_t op)
+ FEXPA Zresult.S, Zop.S
+
To be added.
To be added.
@@ -11937,7 +13417,10 @@
To be added.
- To be added.
+
+ svfloat64_t svexpa[_f64](svuint64_t op)
+ FEXPA Zresult.D, Zop.D
+
To be added.
To be added.
@@ -11966,7 +13449,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ FMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -11995,7 +13483,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ FMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -12033,7 +13526,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmla_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index)
+ FMLA Ztied1.D, Zop2.D, Zop3.D[imm_index]
+
To be added.
To be added.
@@ -12071,7 +13567,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index)
+ FMLA Ztied1.S, Zop2.S, Zop3.S[imm_index]
+
To be added.
To be added.
@@ -12100,7 +13599,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svnmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svnmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svnmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ FNMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -12129,7 +13633,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svnmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svnmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svnmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ FNMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -12158,7 +13667,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ FMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -12187,7 +13701,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ FMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -12225,7 +13744,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmls_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index)
+ FMLS Ztied1.D, Zop2.D, Zop3.D[imm_index]
+
To be added.
To be added.
@@ -12263,7 +13785,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmls_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index)
+ FMLS Ztied1.S, Zop2.S, Zop3.S[imm_index]
+
To be added.
To be added.
@@ -12292,7 +13817,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svnmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svnmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ svfloat64_t svnmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3)
+ FNMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -12321,7 +13851,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svnmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svnmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ svfloat32_t svnmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3)
+ FNMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -12357,7 +13892,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFH op, Pg, [Zbases.S, #0]
+
To be added.
@@ -12392,7 +13930,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFH op, Pg, [Zbases.D, #0]
+
To be added.
@@ -12427,7 +13968,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFH op, Pg, [Zbases.S, #0]
+
To be added.
@@ -12462,7 +14006,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFH op, Pg, [Zbases.D, #0]
+
To be added.
@@ -12498,7 +14045,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.S, SXTW #1]
+
To be added.
@@ -12534,7 +14084,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.D, LSL #1]
+
To be added.
@@ -12570,7 +14123,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.S, UXTW #1]
+
To be added.
@@ -12606,7 +14162,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.D, LSL #1]
+
To be added.
@@ -12642,7 +14201,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.S, SXTW #1]
+
To be added.
@@ -12678,7 +14240,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.D, LSL #1]
+
To be added.
@@ -12714,7 +14279,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.S, UXTW #1]
+
To be added.
@@ -12750,7 +14318,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op)
+ PRFH op, Pg, [Xbase, Zindices.D, LSL #1]
+
To be added.
@@ -12785,7 +14356,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFW op, Pg, [Zbases.S, #0]
+
To be added.
@@ -12820,7 +14394,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFW op, Pg, [Zbases.D, #0]
+
To be added.
@@ -12855,7 +14432,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFW op, Pg, [Zbases.S, #0]
+
To be added.
@@ -12890,7 +14470,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFW op, Pg, [Zbases.D, #0]
+
To be added.
@@ -12926,7 +14509,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.S, SXTW #2]
+
To be added.
@@ -12962,7 +14548,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.D, LSL #2]
+
To be added.
@@ -12998,7 +14587,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.S, UXTW #2]
+
To be added.
@@ -13034,7 +14626,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.D, LSL #2]
+
To be added.
@@ -13070,7 +14665,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.S, SXTW #2]
+
To be added.
@@ -13106,7 +14704,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.D, LSL #2]
+
To be added.
@@ -13142,7 +14743,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.S, UXTW #2]
+
To be added.
@@ -13178,7 +14782,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op)
+ PRFW op, Pg, [Xbase, Zindices.D, LSL #2]
+
To be added.
@@ -13213,7 +14820,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFD op, Pg, [Zbases.S, #0]
+
To be added.
@@ -13248,7 +14858,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFD op, Pg, [Zbases.D, #0]
+
To be added.
@@ -13283,7 +14896,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFD op, Pg, [Zbases.S, #0]
+
To be added.
@@ -13318,7 +14934,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFD op, Pg, [Zbases.D, #0]
+
To be added.
@@ -13354,7 +14973,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.S, SXTW #3]
+
To be added.
@@ -13390,7 +15012,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.D, LSL #3]
+
To be added.
@@ -13426,7 +15051,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.S, UXTW #3]
+
To be added.
@@ -13462,7 +15090,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.D, LSL #3]
+
To be added.
@@ -13498,7 +15129,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.S, SXTW #3]
+
To be added.
@@ -13534,7 +15168,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.D, LSL #3]
+
To be added.
@@ -13570,7 +15207,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.S, UXTW #3]
+
To be added.
@@ -13606,7 +15246,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op)
+ PRFD op, Pg, [Xbase, Zindices.D, LSL #3]
+
To be added.
@@ -13641,7 +15284,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFB op, Pg, [Zbases.S, #0]
+
To be added.
@@ -13676,7 +15322,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFB op, Pg, [Zbases.D, #0]
+
To be added.
@@ -13711,7 +15360,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op)
+ PRFB op, Pg, [Zbases.S, #0]
+
To be added.
@@ -13746,7 +15398,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op)
+ PRFB op, Pg, [Zbases.D, #0]
+
To be added.
@@ -13782,7 +15437,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -13818,7 +15476,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -13854,7 +15515,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -13890,7 +15554,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -13926,7 +15593,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -13962,7 +15632,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -13998,7 +15671,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -14034,7 +15710,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op)
+ PRFB op, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -14060,7 +15739,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svld1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases)
+ LD1D Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -14087,7 +15769,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ LD1D Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -14114,7 +15799,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ LD1D Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -14142,7 +15830,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svld1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -14170,7 +15861,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svld1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -14198,7 +15892,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]
+
To be added.
To be added.
@@ -14226,7 +15923,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]
+
To be added.
To be added.
@@ -14254,7 +15954,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -14282,7 +15985,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -14310,7 +16016,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svld1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]
+
To be added.
To be added.
@@ -14338,7 +16047,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svld1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]
+
To be added.
To be added.
@@ -14366,7 +16078,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]
+
To be added.
To be added.
@@ -14394,7 +16109,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]
+
To be added.
To be added.
@@ -14422,7 +16140,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -14450,7 +16171,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]
+
To be added.
To be added.
@@ -14477,7 +16201,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ LD1B Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -14504,7 +16231,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ LD1B Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -14532,7 +16262,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets)
+ LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -14560,7 +16293,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets)
+ LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -14588,7 +16324,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets)
+ LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -14616,7 +16355,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets)
+ LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -14644,7 +16386,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets)
+ LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -14672,7 +16417,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets)
+ LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -14700,7 +16448,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets)
+ LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -14728,7 +16479,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets)
+ LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -14755,7 +16509,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ LD1SH Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -14782,7 +16539,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ LD1SH Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -14810,7 +16570,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]
+
To be added.
To be added.
@@ -14838,7 +16601,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]
+
To be added.
To be added.
@@ -14866,7 +16632,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -14894,7 +16663,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -14922,7 +16694,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]
+
To be added.
To be added.
@@ -14950,7 +16725,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]
+
To be added.
To be added.
@@ -14978,7 +16756,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -15006,7 +16787,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -15034,7 +16818,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -15062,7 +16849,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -15090,7 +16880,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15118,7 +16911,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15146,7 +16942,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -15174,7 +16973,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets)
+ LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -15202,7 +17004,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15230,7 +17035,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets)
+ LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15257,7 +17065,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ LD1SW Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -15284,7 +17095,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ LD1SW Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -15312,7 +17126,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -15340,7 +17157,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -15368,7 +17188,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -15396,7 +17219,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -15424,7 +17250,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15452,7 +17281,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15480,7 +17312,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15508,7 +17343,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets)
+ LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15535,7 +17373,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ LD1SB Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -15562,7 +17403,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ LD1SB Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -15590,7 +17434,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets)
+ LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -15618,7 +17465,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets)
+ LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -15646,7 +17496,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets)
+ LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15674,7 +17527,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets)
+ LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15702,7 +17558,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets)
+ LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -15730,7 +17589,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets)
+ LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -15758,7 +17620,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets)
+ LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15786,7 +17651,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets)
+ LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15814,7 +17682,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -15842,7 +17713,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -15870,7 +17744,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15898,7 +17775,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -15926,7 +17806,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -15954,7 +17837,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -15982,7 +17868,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16010,7 +17899,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16037,7 +17929,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ LD1H Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -16064,7 +17959,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ LD1H Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -16092,7 +17990,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]
+
To be added.
To be added.
@@ -16120,7 +18021,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]
+
To be added.
To be added.
@@ -16148,7 +18052,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -16176,7 +18083,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -16204,7 +18114,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]
+
To be added.
To be added.
@@ -16232,7 +18145,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices)
+ LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]
+
To be added.
To be added.
@@ -16260,7 +18176,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -16288,7 +18207,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices)
+ LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]
+
To be added.
To be added.
@@ -16316,7 +18238,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16344,7 +18269,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16372,7 +18300,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16400,7 +18331,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16428,7 +18362,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16456,7 +18393,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16484,7 +18424,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16512,7 +18455,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16539,7 +18485,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases)
+ LD1W Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -16566,7 +18515,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases)
+ LD1W Zresult.D, Pg/Z, [Zbases.D, #0]
+
To be added.
To be added.
@@ -16594,7 +18546,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16622,7 +18577,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16650,7 +18608,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16678,7 +18639,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16706,7 +18670,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16734,7 +18701,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16762,7 +18732,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16790,7 +18763,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices)
+ LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]
+
To be added.
To be added.
@@ -16818,7 +18794,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svld1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16846,7 +18825,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svld1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16874,7 +18856,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -16902,7 +18887,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svld1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -16930,7 +18918,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16958,7 +18949,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svld1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -16986,7 +18980,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svld1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -17014,7 +19011,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svld1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -17042,7 +19042,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]
+
To be added.
To be added.
@@ -17070,7 +19073,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets)
+ LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]
+
To be added.
To be added.
@@ -17098,7 +19104,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -17126,7 +19135,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets)
+ LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]
+
To be added.
To be added.
@@ -17153,7 +19165,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b8(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.B
+
To be added.
To be added.
@@ -17180,7 +19195,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b8(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.B
+
To be added.
To be added.
@@ -17207,7 +19225,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b8(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.B
+
To be added.
To be added.
@@ -17234,7 +19255,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b8(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.B
+
To be added.
To be added.
@@ -17261,7 +19285,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b8(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.B
+
To be added.
To be added.
@@ -17288,7 +19315,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b8(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.B
+
To be added.
To be added.
@@ -17315,7 +19345,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b8(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.B
+
To be added.
To be added.
@@ -17342,7 +19375,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b16(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.H
+
To be added.
To be added.
@@ -17369,7 +19405,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b32(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.S
+
To be added.
To be added.
@@ -17396,7 +19435,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svcntp_b64(svbool_t pg, svbool_t op)
+ CNTP Xresult, Pg, Pop.D
+
To be added.
To be added.
@@ -17423,7 +19465,11 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svinsr[_n_u8](svuint8_t op1, uint8_t op2)
+ INSR Ztied1.B, Wop2
+ INSR Ztied1.B, Bop2
+
To be added.
To be added.
@@ -17450,7 +19496,11 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svinsr[_n_f64](svfloat64_t op1, float64_t op2)
+ INSR Ztied1.D, Xop2
+ INSR Ztied1.D, Dop2
+
To be added.
To be added.
@@ -17477,7 +19527,11 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svinsr[_n_s16](svint16_t op1, int16_t op2)
+ INSR Ztied1.H, Wop2
+ INSR Ztied1.H, Hop2
+
To be added.
To be added.
@@ -17504,7 +19558,11 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svinsr[_n_s32](svint32_t op1, int32_t op2)
+ INSR Ztied1.S, Wop2
+ INSR Ztied1.S, Sop2
+
To be added.
To be added.
@@ -17531,7 +19589,11 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svinsr[_n_s64](svint64_t op1, int64_t op2)
+ INSR Ztied1.D, Xop2
+ INSR Ztied1.D, Dop2
+
To be added.
To be added.
@@ -17558,7 +19620,11 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svinsr[_n_s8](svint8_t op1, int8_t op2)
+ INSR Ztied1.B, Wop2
+ INSR Ztied1.B, Bop2
+
To be added.
To be added.
@@ -17585,7 +19651,11 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svinsr[_n_f32](svfloat32_t op1, float32_t op2)
+ INSR Ztied1.S, Wop2
+ INSR Ztied1.S, Sop2
+
To be added.
To be added.
@@ -17612,7 +19682,11 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svinsr[_n_u16](svuint16_t op1, uint16_t op2)
+ INSR Ztied1.H, Wop2
+ INSR Ztied1.H, Hop2
+
To be added.
To be added.
@@ -17639,7 +19713,11 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svinsr[_n_u32](svuint32_t op1, uint32_t op2)
+ INSR Ztied1.S, Wop2
+ INSR Ztied1.S, Sop2
+
To be added.
To be added.
@@ -17666,7 +19744,11 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svinsr[_n_u64](svuint64_t op1, uint64_t op2)
+ INSR Ztied1.D, Xop2
+ INSR Ztied1.D, Dop2
+
To be added.
To be added.
@@ -17687,9 +19769,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
@@ -17712,7 +19795,12 @@
To be added.
- To be added.
+
+ svuint16_t svcls[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op)
+ svuint16_t svcls[_s16]_x(svbool_t pg, svint16_t op)
+ svuint16_t svcls[_s16]_z(svbool_t pg, svint16_t op)
+ CLS Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -17737,7 +19825,12 @@
To be added.
- To be added.
+
+ svuint32_t svcls[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op)
+ svuint32_t svcls[_s32]_x(svbool_t pg, svint32_t op)
+ svuint32_t svcls[_s32]_z(svbool_t pg, svint32_t op)
+ CLS Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -17762,7 +19855,12 @@
To be added.
- To be added.
+
+ svuint64_t svcls[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op)
+ svuint64_t svcls[_s64]_x(svbool_t pg, svint64_t op)
+ svuint64_t svcls[_s64]_z(svbool_t pg, svint64_t op)
+ CLS Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -17787,7 +19885,12 @@
To be added.
- To be added.
+
+ svuint8_t svcls[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op)
+ svuint8_t svcls[_s8]_x(svbool_t pg, svint8_t op)
+ svuint8_t svcls[_s8]_z(svbool_t pg, svint8_t op)
+ CLS Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -17812,7 +19915,12 @@
To be added.
- To be added.
+
+ svuint8_t svclz[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op)
+ svuint8_t svclz[_u8]_x(svbool_t pg, svuint8_t op)
+ svuint8_t svclz[_u8]_z(svbool_t pg, svuint8_t op)
+ CLZ Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -17837,7 +19945,12 @@
To be added.
- To be added.
+
+ svuint16_t svclz[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op)
+ svuint16_t svclz[_s16]_x(svbool_t pg, svint16_t op)
+ svuint16_t svclz[_s16]_z(svbool_t pg, svint16_t op)
+ CLZ Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -17862,7 +19975,12 @@
To be added.
- To be added.
+
+ svuint32_t svclz[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op)
+ svuint32_t svclz[_s32]_x(svbool_t pg, svint32_t op)
+ svuint32_t svclz[_s32]_z(svbool_t pg, svint32_t op)
+ CLZ Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -17887,7 +20005,12 @@
To be added.
- To be added.
+
+ svuint64_t svclz[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op)
+ svuint64_t svclz[_s64]_x(svbool_t pg, svint64_t op)
+ svuint64_t svclz[_s64]_z(svbool_t pg, svint64_t op)
+ CLZ Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -17912,7 +20035,12 @@
To be added.
- To be added.
+
+ svuint8_t svclz[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op)
+ svuint8_t svclz[_s8]_x(svbool_t pg, svint8_t op)
+ svuint8_t svclz[_s8]_z(svbool_t pg, svint8_t op)
+ CLZ Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -17937,7 +20065,12 @@
To be added.
- To be added.
+
+ svuint16_t svclz[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
+ svuint16_t svclz[_u16]_x(svbool_t pg, svuint16_t op)
+ svuint16_t svclz[_u16]_z(svbool_t pg, svuint16_t op)
+ CLZ Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -17962,7 +20095,12 @@
To be added.
- To be added.
+
+ svuint32_t svclz[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svclz[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svclz[_u32]_z(svbool_t pg, svuint32_t op)
+ CLZ Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -17987,7 +20125,12 @@
To be added.
- To be added.
+
+ svuint64_t svclz[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svclz[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svclz[_u64]_z(svbool_t pg, svuint64_t op)
+ CLZ Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -18013,7 +20156,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8x2_t svld2[_u8](svbool_t pg, const uint8_t *base)
+ LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18039,7 +20185,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64x2_t svld2[_f64](svbool_t pg, const float64_t *base)
+ LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18065,7 +20214,10 @@
To be added.
To be added.
- To be added.
+
+ svint16x2_t svld2[_s16](svbool_t pg, const int16_t *base)
+ LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18091,7 +20243,10 @@
To be added.
To be added.
- To be added.
+
+ svint32x2_t svld2[_s32](svbool_t pg, const int32_t *base)
+ LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18117,7 +20272,10 @@
To be added.
To be added.
- To be added.
+
+ svint64x2_t svld2[_s64](svbool_t pg, const int64_t *base)
+ LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18143,7 +20301,10 @@
To be added.
To be added.
- To be added.
+
+ svint8x2_t svld2[_s8](svbool_t pg, const int8_t *base)
+ LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18169,7 +20330,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32x2_t svld2[_f32](svbool_t pg, const float32_t *base)
+ LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18195,7 +20359,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16x2_t svld2[_u16](svbool_t pg, const uint16_t *base)
+ LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18221,7 +20388,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32x2_t svld2[_u32](svbool_t pg, const uint32_t *base)
+ LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18247,7 +20417,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64x2_t svld2[_u64](svbool_t pg, const uint64_t *base)
+ LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18273,7 +20446,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8x3_t svld3[_u8](svbool_t pg, const uint8_t *base)
+ LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18299,7 +20475,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64x3_t svld3[_f64](svbool_t pg, const float64_t *base)
+ LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18325,7 +20504,10 @@
To be added.
To be added.
- To be added.
+
+ svint16x3_t svld3[_s16](svbool_t pg, const int16_t *base)
+ LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18351,7 +20533,10 @@
To be added.
To be added.
- To be added.
+
+ svint32x3_t svld3[_s32](svbool_t pg, const int32_t *base)
+ LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18377,7 +20562,10 @@
To be added.
To be added.
- To be added.
+
+ svint64x3_t svld3[_s64](svbool_t pg, const int64_t *base)
+ LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18403,7 +20591,10 @@
To be added.
To be added.
- To be added.
+
+ svint8x3_t svld3[_s8](svbool_t pg, const int8_t *base)
+ LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18429,7 +20620,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32x3_t svld3[_f32](svbool_t pg, const float32_t *base)
+ LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18455,7 +20649,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16x3_t svld3[_u16](svbool_t pg, const uint16_t *base)
+ LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18481,7 +20678,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32x3_t svld3[_u32](svbool_t pg, const uint32_t *base)
+ LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18507,7 +20707,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64x3_t svld3[_u64](svbool_t pg, const uint64_t *base)
+ LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18533,7 +20736,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8x4_t svld4[_u8](svbool_t pg, const uint8_t *base)
+ LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18559,7 +20765,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64x4_t svld4[_f64](svbool_t pg, const float64_t *base)
+ LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18585,7 +20794,10 @@
To be added.
To be added.
- To be added.
+
+ svint16x4_t svld4[_s16](svbool_t pg, const int16_t *base)
+ LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18611,7 +20823,10 @@
To be added.
To be added.
- To be added.
+
+ svint32x4_t svld4[_s32](svbool_t pg, const int32_t *base)
+ LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18637,7 +20852,10 @@
To be added.
To be added.
- To be added.
+
+ svint64x4_t svld4[_s64](svbool_t pg, const int64_t *base)
+ LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18663,7 +20881,10 @@
To be added.
To be added.
- To be added.
+
+ svint8x4_t svld4[_s8](svbool_t pg, const int8_t *base)
+ LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18689,7 +20910,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32x4_t svld4[_f32](svbool_t pg, const float32_t *base)
+ LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18715,7 +20939,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16x4_t svld4[_u16](svbool_t pg, const uint16_t *base)
+ LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18741,7 +20968,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32x4_t svld4[_u32](svbool_t pg, const uint32_t *base)
+ LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18767,7 +20997,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64x4_t svld4[_u64](svbool_t pg, const uint64_t *base)
+ LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18793,7 +21026,11 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svld1[_u8](svbool_t pg, const uint8_t *base)
+ LD1B Zresult.B, Pg/Z, [Xarray, Xindex]
+ LD1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18819,7 +21056,11 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svld1[_f64](svbool_t pg, const float64_t *base)
+ LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]
+ LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18845,7 +21086,11 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svld1[_s16](svbool_t pg, const int16_t *base)
+ LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]
+ LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18871,7 +21116,11 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svld1[_s32](svbool_t pg, const int32_t *base)
+ LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]
+ LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18897,7 +21146,11 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1[_s64](svbool_t pg, const int64_t *base)
+ LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]
+ LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18923,7 +21176,11 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svld1[_s8](svbool_t pg, const int8_t *base)
+ LD1B Zresult.B, Pg/Z, [Xarray, Xindex]
+ LD1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18949,7 +21206,11 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svld1[_f32](svbool_t pg, const float32_t *base)
+ LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]
+ LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -18975,7 +21236,11 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svld1[_u16](svbool_t pg, const uint16_t *base)
+ LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]
+ LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19001,7 +21266,11 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1[_u32](svbool_t pg, const uint32_t *base)
+ LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]
+ LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19027,7 +21296,11 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1[_u64](svbool_t pg, const uint64_t *base)
+ LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]
+ LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19053,7 +21326,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svld1rq[_u8](svbool_t pg, const uint8_t *base)
+ LD1RQB Zresult.B, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19079,7 +21355,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svld1rq[_f64](svbool_t pg, const float64_t *base)
+ LD1RQD Zresult.D, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19105,7 +21384,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svld1rq[_s16](svbool_t pg, const int16_t *base)
+ LD1RQH Zresult.H, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19131,7 +21413,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svld1rq[_s32](svbool_t pg, const int32_t *base)
+ LD1RQW Zresult.S, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19157,7 +21442,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1rq[_s64](svbool_t pg, const int64_t *base)
+ LD1RQD Zresult.D, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19183,7 +21471,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svld1rq[_s8](svbool_t pg, const int8_t *base)
+ LD1RQB Zresult.B, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19209,7 +21500,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svld1rq[_f32](svbool_t pg, const float32_t *base)
+ LD1RQW Zresult.S, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19235,7 +21529,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svld1rq[_u16](svbool_t pg, const uint16_t *base)
+ LD1RQH Zresult.H, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19261,7 +21558,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1rq[_u32](svbool_t pg, const uint32_t *base)
+ LD1RQW Zresult.S, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19287,7 +21587,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1rq[_u64](svbool_t pg, const uint64_t *base)
+ LD1RQD Zresult.D, Pg/Z, [Xbase, #0]
+
To be added.
To be added.
@@ -19311,7 +21614,10 @@
To be added.
- To be added.
+
+ svint16_t svldnf1ub_s16(svbool_t pg, const uint8_t *base)
+ LDNF1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19335,7 +21641,10 @@
To be added.
- To be added.
+
+ svint32_t svldnf1ub_s32(svbool_t pg, const uint8_t *base)
+ LDNF1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19359,7 +21668,10 @@
To be added.
- To be added.
+
+ svint64_t svldnf1ub_s64(svbool_t pg, const uint8_t *base)
+ LDNF1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19383,7 +21695,10 @@
To be added.
- To be added.
+
+ svuint16_t svldnf1ub_u16(svbool_t pg, const uint8_t *base)
+ LDNF1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19407,7 +21722,10 @@
To be added.
- To be added.
+
+ svuint32_t svldnf1ub_u32(svbool_t pg, const uint8_t *base)
+ LDNF1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19431,7 +21749,10 @@
To be added.
- To be added.
+
+ svuint64_t svldnf1ub_u64(svbool_t pg, const uint8_t *base)
+ LDNF1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19457,7 +21778,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svld1ub_s16(svbool_t pg, const uint8_t *base)
+ LD1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19483,7 +21807,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svld1ub_s32(svbool_t pg, const uint8_t *base)
+ LD1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19509,7 +21836,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1ub_s64(svbool_t pg, const uint8_t *base)
+ LD1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19535,7 +21865,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svld1ub_u16(svbool_t pg, const uint8_t *base)
+ LD1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19561,7 +21894,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1ub_u32(svbool_t pg, const uint8_t *base)
+ LD1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19587,7 +21923,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1ub_u64(svbool_t pg, const uint8_t *base)
+ LD1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19611,7 +21950,10 @@
To be added.
- To be added.
+
+ svint32_t svldnf1sh_s32(svbool_t pg, const int16_t *base)
+ LDNF1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19635,7 +21977,10 @@
To be added.
- To be added.
+
+ svint64_t svldnf1sh_s64(svbool_t pg, const int16_t *base)
+ LDNF1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19659,7 +22004,10 @@
To be added.
- To be added.
+
+ svuint32_t svldnf1sh_u32(svbool_t pg, const int16_t *base)
+ LDNF1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19683,7 +22031,10 @@
To be added.
- To be added.
+
+ svuint64_t svldnf1sh_u64(svbool_t pg, const int16_t *base)
+ LDNF1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19709,7 +22060,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sh_s32(svbool_t pg, const int16_t *base)
+ LD1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19735,7 +22089,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sh_s64(svbool_t pg, const int16_t *base)
+ LD1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19761,7 +22118,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sh_u32(svbool_t pg, const int16_t *base)
+ LD1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19787,7 +22147,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sh_u64(svbool_t pg, const int16_t *base)
+ LD1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19811,7 +22174,10 @@
To be added.
- To be added.
+
+ svint64_t svldnf1sw_s64(svbool_t pg, const int32_t *base)
+ LDNF1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19835,7 +22201,10 @@
To be added.
- To be added.
+
+ svuint64_t svldnf1sw_u64(svbool_t pg, const int32_t *base)
+ LDNF1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19861,7 +22230,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sw_s64(svbool_t pg, const int32_t *base)
+ LD1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19887,7 +22259,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sw_u64(svbool_t pg, const int32_t *base)
+ LD1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19911,7 +22286,10 @@
To be added.
- To be added.
+
+ svuint8_t svldnf1[_u8](svbool_t pg, const uint8_t *base)
+ LDNF1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19935,7 +22313,10 @@
To be added.
- To be added.
+
+ svfloat64_t svldnf1[_f64](svbool_t pg, const float64_t *base)
+ LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19959,7 +22340,10 @@
To be added.
- To be added.
+
+ svint16_t svldnf1[_s16](svbool_t pg, const int16_t *base)
+ LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -19983,7 +22367,10 @@
To be added.
- To be added.
+
+ svint32_t svldnf1[_s32](svbool_t pg, const int32_t *base)
+ LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20007,7 +22394,10 @@
To be added.
- To be added.
+
+ svint64_t svldnf1[_s64](svbool_t pg, const int64_t *base)
+ LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20031,7 +22421,10 @@
To be added.
- To be added.
+
+ svint8_t svldnf1[_s8](svbool_t pg, const int8_t *base)
+ LDNF1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20055,7 +22448,10 @@
To be added.
- To be added.
+
+ svfloat32_t svldnf1[_f32](svbool_t pg, const float32_t *base)
+ LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20079,7 +22475,10 @@
To be added.
- To be added.
+
+ svuint16_t svldnf1[_u16](svbool_t pg, const uint16_t *base)
+ LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20103,7 +22502,10 @@
To be added.
- To be added.
+
+ svuint32_t svldnf1[_u32](svbool_t pg, const uint32_t *base)
+ LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20127,7 +22529,10 @@
To be added.
- To be added.
+
+ svuint64_t svldnf1[_u64](svbool_t pg, const uint64_t *base)
+ LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20153,7 +22558,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svldnt1[_u8](svbool_t pg, const uint8_t *base)
+ LDNT1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20179,7 +22587,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svldnt1[_f64](svbool_t pg, const float64_t *base)
+ LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20205,7 +22616,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svldnt1[_s16](svbool_t pg, const int16_t *base)
+ LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20231,7 +22645,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svldnt1[_s32](svbool_t pg, const int32_t *base)
+ LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20257,7 +22674,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svldnt1[_s64](svbool_t pg, const int64_t *base)
+ LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20283,7 +22703,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svldnt1[_s8](svbool_t pg, const int8_t *base)
+ LDNT1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20309,7 +22732,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svldnt1[_f32](svbool_t pg, const float32_t *base)
+ LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20335,7 +22761,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svldnt1[_u16](svbool_t pg, const uint16_t *base)
+ LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20361,7 +22790,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svldnt1[_u32](svbool_t pg, const uint32_t *base)
+ LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20387,7 +22819,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svldnt1[_u64](svbool_t pg, const uint64_t *base)
+ LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20411,7 +22846,10 @@
To be added.
- To be added.
+
+ svint16_t svldnf1sb_s16(svbool_t pg, const int8_t *base)
+ LDNF1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20435,7 +22873,10 @@
To be added.
- To be added.
+
+ svint32_t svldnf1sb_s32(svbool_t pg, const int8_t *base)
+ LDNF1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20459,7 +22900,10 @@
To be added.
- To be added.
+
+ svint64_t svldnf1sb_s64(svbool_t pg, const int8_t *base)
+ LDNF1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20483,7 +22927,10 @@
To be added.
- To be added.
+
+ svuint16_t svldnf1sb_u16(svbool_t pg, const int8_t *base)
+ LDNF1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20507,7 +22954,10 @@
To be added.
- To be added.
+
+ svuint32_t svldnf1sb_u32(svbool_t pg, const int8_t *base)
+ LDNF1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20531,7 +22981,10 @@
To be added.
- To be added.
+
+ svuint64_t svldnf1sb_u64(svbool_t pg, const int8_t *base)
+ LDNF1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20557,7 +23010,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svld1sb_s16(svbool_t pg, const int8_t *base)
+ LD1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20583,7 +23039,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svld1sb_s32(svbool_t pg, const int8_t *base)
+ LD1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20609,7 +23068,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1sb_s64(svbool_t pg, const int8_t *base)
+ LD1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20635,7 +23097,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svld1sb_u16(svbool_t pg, const int8_t *base)
+ LD1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20661,7 +23126,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1sb_u32(svbool_t pg, const int8_t *base)
+ LD1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20687,7 +23155,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1sb_u64(svbool_t pg, const int8_t *base)
+ LD1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20711,7 +23182,10 @@
To be added.
- To be added.
+
+ svint32_t svldnf1uh_s32(svbool_t pg, const uint16_t *base)
+ LDNF1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20735,7 +23209,10 @@
To be added.
- To be added.
+
+ svint64_t svldnf1uh_s64(svbool_t pg, const uint16_t *base)
+ LDNF1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20759,7 +23236,10 @@
To be added.
- To be added.
+
+ svuint32_t svldnf1uh_u32(svbool_t pg, const uint16_t *base)
+ LDNF1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20783,7 +23263,10 @@
To be added.
- To be added.
+
+ svuint64_t svldnf1uh_u64(svbool_t pg, const uint16_t *base)
+ LDNF1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20809,7 +23292,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svld1uh_s32(svbool_t pg, const uint16_t *base)
+ LD1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20835,7 +23321,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uh_s64(svbool_t pg, const uint16_t *base)
+ LD1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20861,7 +23350,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svld1uh_u32(svbool_t pg, const uint16_t *base)
+ LD1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20887,7 +23379,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uh_u64(svbool_t pg, const uint16_t *base)
+ LD1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20911,7 +23406,10 @@
To be added.
- To be added.
+
+ svint64_t svldnf1uw_s64(svbool_t pg, const uint32_t *base)
+ LDNF1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20935,7 +23433,10 @@
To be added.
- To be added.
+
+ svuint64_t svldnf1uw_u64(svbool_t pg, const uint32_t *base)
+ LDNF1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20961,7 +23462,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svld1uw_s64(svbool_t pg, const uint32_t *base)
+ LD1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -20987,7 +23491,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svld1uw_u64(svbool_t pg, const uint32_t *base)
+ LD1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]
+
To be added.
To be added.
@@ -21014,7 +23521,13 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svmax[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svmax[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svmax[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ UMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ UMAX Ztied2.B, Pg/M, Ztied2.B, Zop1.B
+
To be added.
To be added.
@@ -21041,7 +23554,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmax[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmax[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmax[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ FMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -21068,7 +23587,13 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svmax[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svmax[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svmax[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ SMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ SMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H
+
To be added.
To be added.
@@ -21095,7 +23620,13 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svmax[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svmax[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svmax[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ SMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ SMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -21122,7 +23653,13 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svmax[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svmax[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svmax[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ SMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ SMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -21149,7 +23686,13 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svmax[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svmax[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svmax[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ SMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ SMAX Ztied2.B, Pg/M, Ztied2.B, Zop1.B
+
To be added.
To be added.
@@ -21176,7 +23719,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmax[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmax[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmax[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ FMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -21203,7 +23752,13 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svmax[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svmax[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svmax[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ UMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ UMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H
+
To be added.
To be added.
@@ -21230,7 +23785,13 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svmax[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svmax[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svmax[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ UMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ UMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -21257,7 +23818,13 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svmax[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svmax[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svmax[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ UMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ UMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -21282,7 +23849,10 @@
To be added.
- To be added.
+
+ uint8_t svmaxv[_u8](svbool_t pg, svuint8_t op)
+ UMAXV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -21307,7 +23877,10 @@
To be added.
- To be added.
+
+ float64_t svmaxv[_f64](svbool_t pg, svfloat64_t op)
+ FMAXV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -21332,7 +23905,10 @@
To be added.
- To be added.
+
+ int16_t svmaxv[_s16](svbool_t pg, svint16_t op)
+ SMAXV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -21357,7 +23933,10 @@
To be added.
- To be added.
+
+ int32_t svmaxv[_s32](svbool_t pg, svint32_t op)
+ SMAXV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -21382,7 +23961,10 @@
To be added.
- To be added.
+
+ int64_t svmaxv[_s64](svbool_t pg, svint64_t op)
+ SMAXV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -21407,7 +23989,10 @@
To be added.
- To be added.
+
+ int8_t svmaxv[_s8](svbool_t pg, svint8_t op)
+ SMAXV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -21432,7 +24017,10 @@
To be added.
- To be added.
+
+ float32_t svmaxv[_f32](svbool_t pg, svfloat32_t op)
+ FMAXV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -21457,7 +24045,10 @@
To be added.
- To be added.
+
+ uint16_t svmaxv[_u16](svbool_t pg, svuint16_t op)
+ UMAXV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -21482,7 +24073,10 @@
To be added.
- To be added.
+
+ uint32_t svmaxv[_u32](svbool_t pg, svuint32_t op)
+ UMAXV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -21507,7 +24101,10 @@
To be added.
- To be added.
+
+ uint64_t svmaxv[_u64](svbool_t pg, svuint64_t op)
+ UMAXV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -21534,7 +24131,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmaxnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmaxnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmaxnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FMAXNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ FMAXNM Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -21561,7 +24164,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmaxnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmaxnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmaxnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FMAXNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ FMAXNM Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -21586,7 +24195,10 @@
To be added.
- To be added.
+
+ float64_t svmaxnmv[_f64](svbool_t pg, svfloat64_t op)
+ FMAXNMV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -21611,7 +24223,10 @@
To be added.
- To be added.
+
+ float32_t svmaxnmv[_f32](svbool_t pg, svfloat32_t op)
+ FMAXNMV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -21638,7 +24253,13 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svmin[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svmin[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svmin[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ UMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ UMIN Ztied2.B, Pg/M, Ztied2.B, Zop1.B
+
To be added.
To be added.
@@ -21665,7 +24286,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmin[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmin[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmin[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ FMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -21692,7 +24319,13 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svmin[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svmin[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svmin[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ SMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ SMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H
+
To be added.
To be added.
@@ -21719,7 +24352,13 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svmin[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svmin[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svmin[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ SMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ SMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -21746,7 +24385,13 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svmin[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svmin[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svmin[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ SMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ SMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -21773,7 +24418,13 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svmin[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svmin[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svmin[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ SMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ SMIN Ztied2.B, Pg/M, Ztied2.B, Zop1.B
+
To be added.
To be added.
@@ -21800,7 +24451,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmin[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmin[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmin[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ FMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -21827,7 +24484,13 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svmin[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svmin[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svmin[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ UMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ UMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H
+
To be added.
To be added.
@@ -21854,7 +24517,13 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svmin[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svmin[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svmin[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ UMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ UMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -21881,7 +24550,13 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svmin[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svmin[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svmin[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ UMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ UMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -21906,7 +24581,10 @@
To be added.
- To be added.
+
+ uint8_t svminv[_u8](svbool_t pg, svuint8_t op)
+ UMINV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -21931,7 +24609,10 @@
To be added.
- To be added.
+
+ float64_t svminv[_f64](svbool_t pg, svfloat64_t op)
+ FMINV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -21956,7 +24637,10 @@
To be added.
- To be added.
+
+ int16_t svminv[_s16](svbool_t pg, svint16_t op)
+ SMINV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -21981,7 +24665,10 @@
To be added.
- To be added.
+
+ int32_t svminv[_s32](svbool_t pg, svint32_t op)
+ SMINV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -22006,7 +24693,10 @@
To be added.
- To be added.
+
+ int64_t svminv[_s64](svbool_t pg, svint64_t op)
+ SMINV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -22031,7 +24721,10 @@
To be added.
- To be added.
+
+ int8_t svminv[_s8](svbool_t pg, svint8_t op)
+ SMINV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -22056,7 +24749,10 @@
To be added.
- To be added.
+
+ float32_t svminv[_f32](svbool_t pg, svfloat32_t op)
+ FMINV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -22081,7 +24777,10 @@
To be added.
- To be added.
+
+ uint16_t svminv[_u16](svbool_t pg, svuint16_t op)
+ UMINV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -22106,7 +24805,10 @@
To be added.
- To be added.
+
+ uint32_t svminv[_u32](svbool_t pg, svuint32_t op)
+ UMINV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -22131,7 +24833,10 @@
To be added.
- To be added.
+
+ uint64_t svminv[_u64](svbool_t pg, svuint64_t op)
+ UMINV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -22158,7 +24863,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svminnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svminnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svminnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FMINNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ FMINNM Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -22185,7 +24896,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svminnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svminnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svminnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FMINNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ FMINNM Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -22210,7 +24927,10 @@
To be added.
- To be added.
+
+ float64_t svminnmv[_f64](svbool_t pg, svfloat64_t op)
+ FMINNMV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -22235,7 +24955,10 @@
To be added.
- To be added.
+
+ float32_t svminnmv[_f32](svbool_t pg, svfloat32_t op)
+ FMINNMV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -22262,7 +24985,14 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svmul[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svmul[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svmul[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ MUL Ztied2.B, Pg/M, Ztied2.B, Zop1.B
+ svuint8_t svmul[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+
To be added.
To be added.
@@ -22289,7 +25019,14 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmul[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmul[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmul[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FMUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ FMUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+ svfloat64_t svmul[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+
To be added.
To be added.
@@ -22316,7 +25053,13 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svmul[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svmul[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svmul[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ MUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H
+
To be added.
To be added.
@@ -22343,7 +25086,13 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svmul[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svmul[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svmul[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ MUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -22370,7 +25119,13 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svmul[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svmul[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svmul[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ MUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -22397,7 +25152,13 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svmul[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svmul[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svmul[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+ MUL Ztied2.B, Pg/M, Ztied2.B, Zop1.B
+
To be added.
To be added.
@@ -22424,7 +25185,13 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmul[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmul[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmul[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FMUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ FMUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -22451,7 +25218,13 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svmul[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svmul[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svmul[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+ MUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H
+
To be added.
To be added.
@@ -22478,7 +25251,13 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svmul[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svmul[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svmul[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+ MUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S
+
To be added.
To be added.
@@ -22505,7 +25284,13 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svmul[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svmul[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svmul[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+ MUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D
+
To be added.
To be added.
@@ -22534,7 +25319,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svmla[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
+ svuint8_t svmla[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
+ svuint8_t svmla[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
+ MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B
+
To be added.
To be added.
@@ -22563,7 +25353,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svmla[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
+ svint16_t svmla[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
+ svint16_t svmla[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
+ MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H
+
To be added.
To be added.
@@ -22592,7 +25387,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svmla[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
+ svint32_t svmla[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
+ svint32_t svmla[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
+ MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -22621,7 +25421,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svmla[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
+ svint64_t svmla[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
+ svint64_t svmla[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
+ MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -22650,7 +25455,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svmla[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
+ svint8_t svmla[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
+ svint8_t svmla[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
+ MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B
+
To be added.
To be added.
@@ -22679,7 +25489,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svmla[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
+ svuint16_t svmla[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
+ svuint16_t svmla[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
+ MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H
+
To be added.
To be added.
@@ -22708,7 +25523,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svmla[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
+ svuint32_t svmla[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
+ svuint32_t svmla[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
+ MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -22737,7 +25557,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svmla[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
+ svuint64_t svmla[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
+ svuint64_t svmla[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
+ MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -22775,7 +25600,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svcmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation)
+ svfloat64_t svcmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation)
+ svfloat64_t svcmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation)
+ FCMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation
+
To be added.
To be added.
@@ -22813,7 +25643,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svcmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation)
+ svfloat32_t svcmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation)
+ svfloat32_t svcmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation)
+ FCMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation
+
To be added.
To be added.
@@ -22860,7 +25695,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svcmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index, uint64_t imm_rotation)
+ FCMLA Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation
+
To be added.
To be added.
@@ -22896,7 +25734,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmul_lane[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm_index)
+ FMUL Zresult.D, Zop1.D, Zop2.D[imm_index]
+
To be added.
To be added.
@@ -22932,7 +25773,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmul_lane[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm_index)
+ FMUL Zresult.S, Zop1.S, Zop2.S[imm_index]
+
To be added.
To be added.
@@ -22959,7 +25803,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svmulx[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmulx[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svmulx[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FMULX Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -22986,7 +25835,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svmulx[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmulx[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svmulx[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FMULX Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -23015,7 +25869,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svmls[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
+ svuint8_t svmls[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
+ svuint8_t svmls[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3)
+ MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B
+
To be added.
To be added.
@@ -23044,7 +25903,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svmls[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
+ svint16_t svmls[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
+ svint16_t svmls[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3)
+ MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H
+
To be added.
To be added.
@@ -23073,7 +25937,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svmls[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
+ svint32_t svmls[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
+ svint32_t svmls[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3)
+ MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -23102,7 +25971,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svmls[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
+ svint64_t svmls[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
+ svint64_t svmls[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3)
+ MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -23131,7 +26005,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svmls[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
+ svint8_t svmls[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
+ svint8_t svmls[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3)
+ MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B
+
To be added.
To be added.
@@ -23160,7 +26039,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svmls[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
+ svuint16_t svmls[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
+ svuint16_t svmls[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3)
+ MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H
+
To be added.
To be added.
@@ -23189,7 +26073,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svmls[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
+ svuint32_t svmls[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
+ svuint32_t svmls[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3)
+ MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S
+
To be added.
To be added.
@@ -23218,7 +26107,12 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svmls[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
+ svuint64_t svmls[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
+ svuint64_t svmls[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3)
+ MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D
+
To be added.
To be added.
@@ -23243,7 +26137,12 @@
To be added.
- To be added.
+
+ svfloat64_t svneg[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svneg[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svneg[_f64]_z(svbool_t pg, svfloat64_t op)
+ FNEG Ztied.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -23268,7 +26167,12 @@
To be added.
- To be added.
+
+ svint16_t svneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op)
+ svint16_t svneg[_s16]_x(svbool_t pg, svint16_t op)
+ svint16_t svneg[_s16]_z(svbool_t pg, svint16_t op)
+ NEG Ztied.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -23293,7 +26197,12 @@
To be added.
- To be added.
+
+ svint32_t svneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svneg[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svneg[_s32]_z(svbool_t pg, svint32_t op)
+ NEG Ztied.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -23318,7 +26227,12 @@
To be added.
- To be added.
+
+ svint64_t svneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svneg[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svneg[_s64]_z(svbool_t pg, svint64_t op)
+ NEG Ztied.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -23343,7 +26257,12 @@
To be added.
- To be added.
+
+ svint8_t svneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op)
+ svint8_t svneg[_s8]_x(svbool_t pg, svint8_t op)
+ svint8_t svneg[_s8]_z(svbool_t pg, svint8_t op)
+ NEG Ztied.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -23368,7 +26287,12 @@
To be added.
- To be added.
+
+ svfloat32_t svneg[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svneg[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svneg[_f32]_z(svbool_t pg, svfloat32_t op)
+ FNEG Ztied.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -23393,7 +26317,13 @@
To be added.
- To be added.
+
+ svuint8_t svnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op)
+ svuint8_t svnot[_u8]_x(svbool_t pg, svuint8_t op)
+ svuint8_t svnot[_u8]_z(svbool_t pg, svuint8_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -23418,7 +26348,13 @@
To be added.
- To be added.
+
+ svint16_t svnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op)
+ svint16_t svnot[_s16]_x(svbool_t pg, svint16_t op)
+ svint16_t svnot[_s16]_z(svbool_t pg, svint16_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -23443,7 +26379,13 @@
To be added.
- To be added.
+
+ svint32_t svnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svnot[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svnot[_s32]_z(svbool_t pg, svint32_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -23468,7 +26410,13 @@
To be added.
- To be added.
+
+ svint64_t svnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svnot[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svnot[_s64]_z(svbool_t pg, svint64_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -23493,7 +26441,13 @@
To be added.
- To be added.
+
+ svint8_t svnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op)
+ svint8_t svnot[_s8]_x(svbool_t pg, svint8_t op)
+ svint8_t svnot[_s8]_z(svbool_t pg, svint8_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -23518,7 +26472,13 @@
To be added.
- To be added.
+
+ svuint16_t svnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
+ svuint16_t svnot[_u16]_x(svbool_t pg, svuint16_t op)
+ svuint16_t svnot[_u16]_z(svbool_t pg, svuint16_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -23543,7 +26503,13 @@
To be added.
- To be added.
+
+ svuint32_t svnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svnot[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svnot[_u32]_z(svbool_t pg, svuint32_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -23568,7 +26534,13 @@
To be added.
- To be added.
+
+ svuint64_t svnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svnot[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svnot[_u64]_z(svbool_t pg, svuint64_t op)
+ svbool_t svnot[_b]_z(svbool_t pg, svbool_t op)
+ NOT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -23595,7 +26567,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svorr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svorr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svorr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23622,7 +26599,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svorr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svorr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svorr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23649,7 +26631,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svorr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svorr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svorr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23676,7 +26663,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svorr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svorr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svorr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23703,7 +26695,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svorr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svorr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svorr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23730,7 +26727,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svorr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svorr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svorr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23757,7 +26759,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svorr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svorr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svorr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23784,7 +26791,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svorr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svorr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svorr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ ORR Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -23809,7 +26821,10 @@
To be added.
- To be added.
+
+ uint8_t svorv[_u8](svbool_t pg, svuint8_t op)
+ ORV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -23834,7 +26849,10 @@
To be added.
- To be added.
+
+ int16_t svorv[_s16](svbool_t pg, svint16_t op)
+ ORV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -23859,7 +26877,10 @@
To be added.
- To be added.
+
+ int32_t svorv[_s32](svbool_t pg, svint32_t op)
+ ORV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -23884,7 +26905,10 @@
To be added.
- To be added.
+
+ int64_t svorv[_s64](svbool_t pg, svint64_t op)
+ ORV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -23909,7 +26933,10 @@
To be added.
- To be added.
+
+ int8_t svorv[_s8](svbool_t pg, svint8_t op)
+ ORV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -23934,7 +26961,10 @@
To be added.
- To be added.
+
+ uint16_t svorv[_u16](svbool_t pg, svuint16_t op)
+ ORV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -23959,7 +26989,10 @@
To be added.
- To be added.
+
+ uint32_t svorv[_u32](svbool_t pg, svuint32_t op)
+ ORV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -23984,7 +27017,10 @@
To be added.
- To be added.
+
+ uint64_t svorv[_u64](svbool_t pg, svuint64_t op)
+ ORV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -24009,7 +27045,12 @@
To be added.
- To be added.
+
+ svuint8_t svcnt[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op)
+ svuint8_t svcnt[_u8]_x(svbool_t pg, svuint8_t op)
+ svuint8_t svcnt[_u8]_z(svbool_t pg, svuint8_t op)
+ CNT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -24034,7 +27075,12 @@
To be added.
- To be added.
+
+ svuint64_t svcnt[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op)
+ svuint64_t svcnt[_f64]_x(svbool_t pg, svfloat64_t op)
+ svuint64_t svcnt[_f64]_z(svbool_t pg, svfloat64_t op)
+ CNT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -24059,7 +27105,12 @@
To be added.
- To be added.
+
+ svuint16_t svcnt[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op)
+ svuint16_t svcnt[_s16]_x(svbool_t pg, svint16_t op)
+ svuint16_t svcnt[_s16]_z(svbool_t pg, svint16_t op)
+ CNT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -24084,7 +27135,12 @@
To be added.
- To be added.
+
+ svuint32_t svcnt[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op)
+ svuint32_t svcnt[_s32]_x(svbool_t pg, svint32_t op)
+ svuint32_t svcnt[_s32]_z(svbool_t pg, svint32_t op)
+ CNT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -24109,7 +27165,12 @@
To be added.
- To be added.
+
+ svuint64_t svcnt[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op)
+ svuint64_t svcnt[_s64]_x(svbool_t pg, svint64_t op)
+ svuint64_t svcnt[_s64]_z(svbool_t pg, svint64_t op)
+ CNT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -24134,7 +27195,12 @@
To be added.
- To be added.
+
+ svuint8_t svcnt[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op)
+ svuint8_t svcnt[_s8]_x(svbool_t pg, svint8_t op)
+ svuint8_t svcnt[_s8]_z(svbool_t pg, svint8_t op)
+ CNT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -24159,7 +27225,12 @@
To be added.
- To be added.
+
+ svuint32_t svcnt[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op)
+ svuint32_t svcnt[_f32]_x(svbool_t pg, svfloat32_t op)
+ svuint32_t svcnt[_f32]_z(svbool_t pg, svfloat32_t op)
+ CNT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -24184,7 +27255,12 @@
To be added.
- To be added.
+
+ svuint16_t svcnt[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
+ svuint16_t svcnt[_u16]_x(svbool_t pg, svuint16_t op)
+ svuint16_t svcnt[_u16]_z(svbool_t pg, svuint16_t op)
+ CNT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -24209,7 +27285,12 @@
To be added.
- To be added.
+
+ svuint32_t svcnt[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svcnt[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svcnt[_u32]_z(svbool_t pg, svuint32_t op)
+ CNT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -24234,7 +27315,12 @@
To be added.
- To be added.
+
+ svuint64_t svcnt[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svcnt[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svcnt[_u64]_z(svbool_t pg, svuint64_t op)
+ CNT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -24269,7 +27355,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfb(svbool_t pg, const void *base, enum svprfop op)
+ PRFB op, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -24303,7 +27392,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfh(svbool_t pg, const void *base, enum svprfop op)
+ PRFH op, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -24337,7 +27429,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfw(svbool_t pg, const void *base, enum svprfop op)
+ PRFW op, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -24371,7 +27466,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svprfd(svbool_t pg, const void *base, enum svprfop op)
+ PRFD op, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -24395,7 +27493,10 @@
To be added.
- To be added.
+
+ svfloat64_t svrecpe[_f64](svfloat64_t op)
+ FRECPE Zresult.D, Zop.D
+
To be added.
To be added.
@@ -24420,7 +27521,10 @@
To be added.
- To be added.
+
+ svfloat32_t svrecpe[_f32](svfloat32_t op)
+ FRECPE Zresult.S, Zop.S
+
To be added.
To be added.
@@ -24445,7 +27549,12 @@
To be added.
- To be added.
+
+ svfloat64_t svrecpx[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svrecpx[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svrecpx[_f64]_z(svbool_t pg, svfloat64_t op)
+ FRECPX Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -24470,7 +27579,12 @@
To be added.
- To be added.
+
+ svfloat32_t svrecpx[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svrecpx[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svrecpx[_f32]_z(svbool_t pg, svfloat32_t op)
+ FRECPX Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -24495,7 +27609,10 @@
To be added.
- To be added.
+
+ svfloat64_t svrsqrte[_f64](svfloat64_t op)
+ FRSQRTE Zresult.D, Zop.D
+
To be added.
To be added.
@@ -24520,7 +27637,10 @@
To be added.
- To be added.
+
+ svfloat32_t svrsqrte[_f32](svfloat32_t op)
+ FRSQRTE Zresult.S, Zop.S
+
To be added.
To be added.
@@ -24547,7 +27667,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svrsqrts[_f64](svfloat64_t op1, svfloat64_t op2)
+ FRSQRTS Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -24574,7 +27697,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svrsqrts[_f32](svfloat32_t op1, svfloat32_t op2)
+ FRSQRTS Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -24601,7 +27727,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svrecps[_f64](svfloat64_t op1, svfloat64_t op2)
+ FRECPS Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -24628,7 +27757,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svrecps[_f32](svfloat32_t op1, svfloat32_t op2)
+ FRECPS Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -24653,7 +27785,12 @@
To be added.
- To be added.
+
+ svuint8_t svrbit[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op)
+ svuint8_t svrbit[_u8]_x(svbool_t pg, svuint8_t op)
+ svuint8_t svrbit[_u8]_z(svbool_t pg, svuint8_t op)
+ RBIT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -24678,7 +27815,12 @@
To be added.
- To be added.
+
+ svint16_t svrbit[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op)
+ svint16_t svrbit[_s16]_x(svbool_t pg, svint16_t op)
+ svint16_t svrbit[_s16]_z(svbool_t pg, svint16_t op)
+ RBIT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -24703,7 +27845,12 @@
To be added.
- To be added.
+
+ svint32_t svrbit[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svrbit[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svrbit[_s32]_z(svbool_t pg, svint32_t op)
+ RBIT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -24728,7 +27875,12 @@
To be added.
- To be added.
+
+ svint64_t svrbit[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svrbit[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svrbit[_s64]_z(svbool_t pg, svint64_t op)
+ RBIT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -24753,7 +27905,12 @@
To be added.
- To be added.
+
+ svint8_t svrbit[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op)
+ svint8_t svrbit[_s8]_x(svbool_t pg, svint8_t op)
+ svint8_t svrbit[_s8]_z(svbool_t pg, svint8_t op)
+ RBIT Zresult.B, Pg/M, Zop.B
+
To be added.
To be added.
@@ -24778,7 +27935,12 @@
To be added.
- To be added.
+
+ svuint16_t svrbit[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
+ svuint16_t svrbit[_u16]_x(svbool_t pg, svuint16_t op)
+ svuint16_t svrbit[_u16]_z(svbool_t pg, svuint16_t op)
+ RBIT Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -24803,7 +27965,12 @@
To be added.
- To be added.
+
+ svuint32_t svrbit[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svrbit[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svrbit[_u32]_z(svbool_t pg, svuint32_t op)
+ RBIT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -24828,7 +27995,12 @@
To be added.
- To be added.
+
+ svuint64_t svrbit[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svrbit[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svrbit[_u64]_z(svbool_t pg, svuint64_t op)
+ RBIT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -24853,7 +28025,10 @@
To be added.
- To be added.
+
+ svuint8_t svrev[_u8](svuint8_t op)
+ REV Zresult.B, Zop.B
+
To be added.
To be added.
@@ -24878,7 +28053,10 @@
To be added.
- To be added.
+
+ svfloat64_t svrev[_f64](svfloat64_t op)
+ REV Zresult.D, Zop.D
+
To be added.
To be added.
@@ -24903,7 +28081,10 @@
To be added.
- To be added.
+
+ svint16_t svrev[_s16](svint16_t op)
+ REV Zresult.H, Zop.H
+
To be added.
To be added.
@@ -24928,7 +28109,10 @@
To be added.
- To be added.
+
+ svint32_t svrev[_s32](svint32_t op)
+ REV Zresult.S, Zop.S
+
To be added.
To be added.
@@ -24953,7 +28137,10 @@
To be added.
- To be added.
+
+ svint64_t svrev[_s64](svint64_t op)
+ REV Zresult.D, Zop.D
+
To be added.
To be added.
@@ -24978,7 +28165,10 @@
To be added.
- To be added.
+
+ svint8_t svrev[_s8](svint8_t op)
+ REV Zresult.B, Zop.B
+
To be added.
To be added.
@@ -25003,7 +28193,10 @@
To be added.
- To be added.
+
+ svfloat32_t svrev[_f32](svfloat32_t op)
+ REV Zresult.S, Zop.S
+
To be added.
To be added.
@@ -25028,7 +28221,10 @@
To be added.
- To be added.
+
+ svuint16_t svrev[_u16](svuint16_t op)
+ REV Zresult.H, Zop.H
+
To be added.
To be added.
@@ -25053,7 +28249,10 @@
To be added.
- To be added.
+
+ svuint32_t svrev[_u32](svuint32_t op)
+ REV Zresult.S, Zop.S
+
To be added.
To be added.
@@ -25078,7 +28277,10 @@
To be added.
- To be added.
+
+ svuint64_t svrev[_u64](svuint64_t op)
+ REV Zresult.D, Zop.D
+
To be added.
To be added.
@@ -25103,7 +28305,12 @@
To be added.
- To be added.
+
+ svint32_t svrevh[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svrevh[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svrevh[_s32]_z(svbool_t pg, svint32_t op)
+ REVH Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25128,7 +28335,12 @@
To be added.
- To be added.
+
+ svint64_t svrevh[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svrevh[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svrevh[_s64]_z(svbool_t pg, svint64_t op)
+ REVH Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25153,7 +28365,12 @@
To be added.
- To be added.
+
+ svuint32_t svrevh[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svrevh[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svrevh[_u32]_z(svbool_t pg, svuint32_t op)
+ REVH Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25178,7 +28395,12 @@
To be added.
- To be added.
+
+ svuint64_t svrevh[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svrevh[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svrevh[_u64]_z(svbool_t pg, svuint64_t op)
+ REVH Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25203,7 +28425,12 @@
To be added.
- To be added.
+
+ svint64_t svrevw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svrevw[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svrevw[_s64]_z(svbool_t pg, svint64_t op)
+ REVW Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25228,7 +28455,12 @@
To be added.
- To be added.
+
+ svuint64_t svrevw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svrevw[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svrevw[_u64]_z(svbool_t pg, svuint64_t op)
+ REVW Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25253,7 +28485,12 @@
To be added.
- To be added.
+
+ svint16_t svrevb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op)
+ svint16_t svrevb[_s16]_x(svbool_t pg, svint16_t op)
+ svint16_t svrevb[_s16]_z(svbool_t pg, svint16_t op)
+ REVB Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -25278,7 +28515,12 @@
To be added.
- To be added.
+
+ svint32_t svrevb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svrevb[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svrevb[_s32]_z(svbool_t pg, svint32_t op)
+ REVB Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25303,7 +28545,12 @@
To be added.
- To be added.
+
+ svint64_t svrevb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svrevb[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svrevb[_s64]_z(svbool_t pg, svint64_t op)
+ REVB Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25328,7 +28575,12 @@
To be added.
- To be added.
+
+ svuint16_t svrevb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
+ svuint16_t svrevb[_u16]_x(svbool_t pg, svuint16_t op)
+ svuint16_t svrevb[_u16]_z(svbool_t pg, svuint16_t op)
+ REVB Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -25353,7 +28605,12 @@
To be added.
- To be added.
+
+ svuint32_t svrevb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svrevb[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svrevb[_u32]_z(svbool_t pg, svuint32_t op)
+ REVB Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25378,7 +28635,12 @@
To be added.
- To be added.
+
+ svuint64_t svrevb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svrevb[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svrevb[_u64]_z(svbool_t pg, svuint64_t op)
+ REVB Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25403,7 +28665,12 @@
To be added.
- To be added.
+
+ svfloat64_t svrinta[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svrinta[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svrinta[_f64]_z(svbool_t pg, svfloat64_t op)
+ FRINTA Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25428,7 +28695,12 @@
To be added.
- To be added.
+
+ svfloat32_t svrinta[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svrinta[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svrinta[_f32]_z(svbool_t pg, svfloat32_t op)
+ FRINTA Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25453,7 +28725,12 @@
To be added.
- To be added.
+
+ svfloat64_t svrintn[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintn[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintn[_f64]_z(svbool_t pg, svfloat64_t op)
+ FRINTN Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25478,7 +28755,12 @@
To be added.
- To be added.
+
+ svfloat32_t svrintn[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintn[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintn[_f32]_z(svbool_t pg, svfloat32_t op)
+ FRINTN Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25503,7 +28785,12 @@
To be added.
- To be added.
+
+ svfloat64_t svrintm[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintm[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintm[_f64]_z(svbool_t pg, svfloat64_t op)
+ FRINTM Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25528,7 +28815,12 @@
To be added.
- To be added.
+
+ svfloat32_t svrintm[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintm[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintm[_f32]_z(svbool_t pg, svfloat32_t op)
+ FRINTM Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25553,7 +28845,12 @@
To be added.
- To be added.
+
+ svfloat64_t svrintp[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintp[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintp[_f64]_z(svbool_t pg, svfloat64_t op)
+ FRINTP Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25578,7 +28875,12 @@
To be added.
- To be added.
+
+ svfloat32_t svrintp[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintp[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintp[_f32]_z(svbool_t pg, svfloat32_t op)
+ FRINTP Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25603,7 +28905,12 @@
To be added.
- To be added.
+
+ svfloat64_t svrintz[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintz[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svrintz[_f64]_z(svbool_t pg, svfloat64_t op)
+ FRINTZ Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -25628,7 +28935,12 @@
To be added.
- To be added.
+
+ svfloat32_t svrintz[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintz[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svrintz[_f32]_z(svbool_t pg, svfloat32_t op)
+ FRINTZ Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -25670,7 +28982,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqdech_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECH Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -25712,7 +29027,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqdech_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECH Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -25754,7 +29072,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svqdech_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECH Ztied.H, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -25796,7 +29117,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svqdech_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECH Ztied.H, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -25838,7 +29162,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqdech_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECH Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -25880,7 +29207,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqdech_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECH Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -25922,7 +29252,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqdecw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECW Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -25964,7 +29297,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqdecw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECW Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26006,7 +29342,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svqdecw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECW Ztied.S, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26048,7 +29387,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svqdecw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECW Ztied.S, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26090,7 +29432,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqdecw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECW Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26132,7 +29477,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqdecw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECW Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26174,7 +29522,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqdecd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECD Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26216,7 +29567,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqdecd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECD Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26258,7 +29612,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svqdecd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECD Ztied.D, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26300,7 +29657,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svqdecd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECD Ztied.D, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26342,7 +29702,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqdecd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECD Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26384,7 +29747,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqdecd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECD Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26426,7 +29792,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqdecb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECB Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26468,7 +29837,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqdecb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQDECB Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26510,7 +29882,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqdecb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECB Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26552,7 +29927,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqdecb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQDECB Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -26579,7 +29957,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqdecp[_n_s32]_b8(int32_t op, svbool_t pg)
+ SQDECP Xtied, Pg.B, Wtied
+
To be added.
To be added.
@@ -26606,7 +29987,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqdecp[_n_s32]_b16(int32_t op, svbool_t pg)
+ SQDECP Xtied, Pg.H, Wtied
+
To be added.
To be added.
@@ -26633,7 +30017,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqdecp[_n_s32]_b32(int32_t op, svbool_t pg)
+ SQDECP Xtied, Pg.S, Wtied
+
To be added.
To be added.
@@ -26660,7 +30047,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqdecp[_n_s32]_b64(int32_t op, svbool_t pg)
+ SQDECP Xtied, Pg.D, Wtied
+
To be added.
To be added.
@@ -26687,7 +30077,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqdecp[_n_s64]_b8(int64_t op, svbool_t pg)
+ SQDECP Xtied, Pg.B
+
To be added.
To be added.
@@ -26714,7 +30107,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqdecp[_n_s64]_b16(int64_t op, svbool_t pg)
+ SQDECP Xtied, Pg.H
+
To be added.
To be added.
@@ -26741,7 +30137,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqdecp[_n_s64]_b32(int64_t op, svbool_t pg)
+ SQDECP Xtied, Pg.S
+
To be added.
To be added.
@@ -26768,7 +30167,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqdecp[_n_s64]_b64(int64_t op, svbool_t pg)
+ SQDECP Xtied, Pg.D
+
To be added.
To be added.
@@ -26795,7 +30197,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svqdecp[_s16](svint16_t op, svbool_t pg)
+ SQDECP Ztied.H, Pg
+
To be added.
To be added.
@@ -26822,7 +30227,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svqdecp[_s32](svint32_t op, svbool_t pg)
+ SQDECP Ztied.S, Pg
+
To be added.
To be added.
@@ -26849,7 +30257,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svqdecp[_s64](svint64_t op, svbool_t pg)
+ SQDECP Ztied.D, Pg
+
To be added.
To be added.
@@ -26876,7 +30287,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svqdecp[_u16](svuint16_t op, svbool_t pg)
+ UQDECP Ztied.H, Pg
+
To be added.
To be added.
@@ -26903,7 +30317,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svqdecp[_u32](svuint32_t op, svbool_t pg)
+ UQDECP Ztied.S, Pg
+
To be added.
To be added.
@@ -26930,7 +30347,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svqdecp[_u64](svuint64_t op, svbool_t pg)
+ UQDECP Ztied.D, Pg
+
To be added.
To be added.
@@ -26957,7 +30377,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqdecp[_n_u32]_b8(uint32_t op, svbool_t pg)
+ UQDECP Wtied, Pg.B
+
To be added.
To be added.
@@ -26984,7 +30407,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqdecp[_n_u32]_b16(uint32_t op, svbool_t pg)
+ UQDECP Wtied, Pg.H
+
To be added.
To be added.
@@ -27011,7 +30437,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqdecp[_n_u32]_b32(uint32_t op, svbool_t pg)
+ UQDECP Wtied, Pg.S
+
To be added.
To be added.
@@ -27038,7 +30467,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqdecp[_n_u32]_b64(uint32_t op, svbool_t pg)
+ UQDECP Wtied, Pg.D
+
To be added.
To be added.
@@ -27065,7 +30497,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqdecp[_n_u64]_b8(uint64_t op, svbool_t pg)
+ UQDECP Xtied, Pg.B
+
To be added.
To be added.
@@ -27092,7 +30527,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqdecp[_n_u64]_b16(uint64_t op, svbool_t pg)
+ UQDECP Xtied, Pg.H
+
To be added.
To be added.
@@ -27119,7 +30557,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqdecp[_n_u64]_b32(uint64_t op, svbool_t pg)
+ UQDECP Xtied, Pg.S
+
To be added.
To be added.
@@ -27146,7 +30587,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqdecp[_n_u64]_b64(uint64_t op, svbool_t pg)
+ UQDECP Xtied, Pg.D
+
To be added.
To be added.
@@ -27188,7 +30632,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqinch_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCH Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27230,7 +30677,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqinch_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCH Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27272,7 +30722,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svqinch_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCH Ztied.H, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27314,7 +30767,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svqinch_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCH Ztied.H, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27356,7 +30812,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqinch_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCH Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27398,7 +30857,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqinch_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCH Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27440,7 +30902,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqincw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCW Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27482,7 +30947,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqincw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCW Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27524,7 +30992,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svqincw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCW Ztied.S, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27566,7 +31037,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svqincw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCW Ztied.S, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27608,7 +31082,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqincw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCW Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27650,7 +31127,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqincw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCW Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27692,7 +31172,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqincd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCD Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27734,7 +31217,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqincd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCD Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27776,7 +31262,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svqincd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCD Ztied.D, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27818,7 +31307,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svqincd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCD Ztied.D, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27860,7 +31352,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqincd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCD Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27902,7 +31397,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqincd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCD Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27944,7 +31442,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int32_t svqincb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCB Xtied, Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -27986,7 +31487,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ int64_t svqincb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor)
+ SQINCB Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -28028,7 +31532,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint32_t svqincb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCB Wtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -28070,7 +31577,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ uint64_t svqincb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor)
+ UQINCB Xtied, pattern, MUL #imm_factor
+
To be added.
To be added.
@@ -28097,7 +31607,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqincp[_n_s32]_b8(int32_t op, svbool_t pg)
+ SQINCP Xtied, Pg.B, Wtied
+
To be added.
To be added.
@@ -28124,7 +31637,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqincp[_n_s32]_b16(int32_t op, svbool_t pg)
+ SQINCP Xtied, Pg.H, Wtied
+
To be added.
To be added.
@@ -28151,7 +31667,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqincp[_n_s32]_b32(int32_t op, svbool_t pg)
+ SQINCP Xtied, Pg.S, Wtied
+
To be added.
To be added.
@@ -28178,7 +31697,10 @@
To be added.
To be added.
- To be added.
+
+ int32_t svqincp[_n_s32]_b64(int32_t op, svbool_t pg)
+ SQINCP Xtied, Pg.D, Wtied
+
To be added.
To be added.
@@ -28205,7 +31727,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqincp[_n_s64]_b8(int64_t op, svbool_t pg)
+ SQINCP Xtied, Pg.B
+
To be added.
To be added.
@@ -28232,7 +31757,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqincp[_n_s64]_b16(int64_t op, svbool_t pg)
+ SQINCP Xtied, Pg.H
+
To be added.
To be added.
@@ -28259,7 +31787,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqincp[_n_s64]_b32(int64_t op, svbool_t pg)
+ SQINCP Xtied, Pg.S
+
To be added.
To be added.
@@ -28286,7 +31817,10 @@
To be added.
To be added.
- To be added.
+
+ int64_t svqincp[_n_s64]_b64(int64_t op, svbool_t pg)
+ SQINCP Xtied, Pg.D
+
To be added.
To be added.
@@ -28313,7 +31847,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svqincp[_s16](svint16_t op, svbool_t pg)
+ SQINCP Ztied.H, Pg
+
To be added.
To be added.
@@ -28340,7 +31877,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svqincp[_s32](svint32_t op, svbool_t pg)
+ SQINCP Ztied.S, Pg
+
To be added.
To be added.
@@ -28367,7 +31907,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svqincp[_s64](svint64_t op, svbool_t pg)
+ SQINCP Ztied.D, Pg
+
To be added.
To be added.
@@ -28394,7 +31937,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svqincp[_u16](svuint16_t op, svbool_t pg)
+ UQINCP Ztied.H, Pg
+
To be added.
To be added.
@@ -28421,7 +31967,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svqincp[_u32](svuint32_t op, svbool_t pg)
+ UQINCP Ztied.S, Pg
+
To be added.
To be added.
@@ -28448,7 +31997,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svqincp[_u64](svuint64_t op, svbool_t pg)
+ UQINCP Ztied.D, Pg
+
To be added.
To be added.
@@ -28475,7 +32027,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqincp[_n_u32]_b8(uint32_t op, svbool_t pg)
+ UQINCP Wtied, Pg.B
+
To be added.
To be added.
@@ -28502,7 +32057,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqincp[_n_u32]_b16(uint32_t op, svbool_t pg)
+ UQINCP Wtied, Pg.H
+
To be added.
To be added.
@@ -28529,7 +32087,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqincp[_n_u32]_b32(uint32_t op, svbool_t pg)
+ UQINCP Wtied, Pg.S
+
To be added.
To be added.
@@ -28556,7 +32117,10 @@
To be added.
To be added.
- To be added.
+
+ uint32_t svqincp[_n_u32]_b64(uint32_t op, svbool_t pg)
+ UQINCP Wtied, Pg.D
+
To be added.
To be added.
@@ -28583,7 +32147,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqincp[_n_u64]_b8(uint64_t op, svbool_t pg)
+ UQINCP Xtied, Pg.B
+
To be added.
To be added.
@@ -28610,7 +32177,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqincp[_n_u64]_b16(uint64_t op, svbool_t pg)
+ UQINCP Xtied, Pg.H
+
To be added.
To be added.
@@ -28637,7 +32207,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqincp[_n_u64]_b32(uint64_t op, svbool_t pg)
+ UQINCP Xtied, Pg.S
+
To be added.
To be added.
@@ -28664,7 +32237,10 @@
To be added.
To be added.
- To be added.
+
+ uint64_t svqincp[_n_u64]_b64(uint64_t op, svbool_t pg)
+ UQINCP Xtied, Pg.D
+
To be added.
To be added.
@@ -28691,7 +32267,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svscale[_f64]_m(svbool_t pg, svfloat64_t op1, svint64_t op2)
+ svfloat64_t svscale[_f64]_x(svbool_t pg, svfloat64_t op1, svint64_t op2)
+ svfloat64_t svscale[_f64]_z(svbool_t pg, svfloat64_t op1, svint64_t op2)
+ FSCALE Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -28718,7 +32299,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svscale[_f32]_m(svbool_t pg, svfloat32_t op1, svint32_t op2)
+ svfloat32_t svscale[_f32]_x(svbool_t pg, svfloat32_t op1, svint32_t op2)
+ svfloat32_t svscale[_f32]_z(svbool_t pg, svfloat32_t op1, svint32_t op2)
+ FSCALE Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -28747,7 +32333,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data)
+ ST1D Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -28775,7 +32364,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data)
+ ST1D Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -28803,7 +32395,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data)
+ ST1D Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -28832,7 +32427,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data)
+ ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -28861,7 +32459,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data)
+ ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -28890,7 +32491,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[s32]offset[_s32](svbool_t pg, int32_t *base, svint32_t offsets, svint32_t data)
+ ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -28919,7 +32523,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data)
+ ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -28948,7 +32555,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data)
+ ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -28977,7 +32587,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data)
+ ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29006,7 +32619,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[s32]offset[_f32](svbool_t pg, float32_t *base, svint32_t offsets, svfloat32_t data)
+ ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -29035,7 +32651,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data)
+ ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -29064,7 +32683,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[s32]offset[_u32](svbool_t pg, uint32_t *base, svint32_t offsets, svuint32_t data)
+ ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -29093,7 +32715,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data)
+ ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -29122,7 +32747,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data)
+ ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29151,7 +32779,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data)
+ ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29179,7 +32810,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data)
+ ST1H Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -29207,7 +32841,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data)
+ ST1H Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -29236,7 +32873,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[s32]offset[_s32](svbool_t pg, int16_t *base, svint32_t offsets, svint32_t data)
+ ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -29265,7 +32905,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data)
+ ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -29294,7 +32937,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data)
+ ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29323,7 +32969,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data)
+ ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29352,7 +33001,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[s32]offset[_u32](svbool_t pg, uint16_t *base, svint32_t offsets, svuint32_t data)
+ ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -29381,7 +33033,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data)
+ ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -29410,7 +33065,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data)
+ ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29439,7 +33097,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data)
+ ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29467,7 +33128,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data)
+ ST1W Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -29495,7 +33159,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data)
+ ST1W Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -29524,7 +33191,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data)
+ ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29553,7 +33223,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data)
+ ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29582,7 +33255,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data)
+ ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29611,7 +33287,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data)
+ ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29639,7 +33318,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data)
+ ST1B Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -29667,7 +33349,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data)
+ ST1B Zdata.D, Pg, [Zbases.D, #0]
+
To be added.
@@ -29696,7 +33381,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[s32]offset[_s32](svbool_t pg, int8_t *base, svint32_t offsets, svint32_t data)
+ ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -29725,7 +33413,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data)
+ ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -29754,7 +33445,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data)
+ ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29783,7 +33477,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data)
+ ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29812,7 +33509,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[s32]offset[_u32](svbool_t pg, uint8_t *base, svint32_t offsets, svuint32_t data)
+ ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]
+
To be added.
@@ -29841,7 +33541,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data)
+ ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]
+
To be added.
@@ -29870,7 +33573,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data)
+ ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29899,7 +33605,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data)
+ ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]
+
To be added.
@@ -29925,7 +33634,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svlsl[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svlsl[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svlsl[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -29952,7 +33666,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svlsl_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2)
+ svuint8_t svlsl_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2)
+ LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D
+ LSL Zresult.B, Zop1.B, Zop2.D
+
To be added.
To be added.
@@ -29979,7 +33698,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svlsl[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2)
+ svint16_t svlsl[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2)
+ svint16_t svlsl[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2)
+ LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -30006,7 +33730,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svlsl_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2)
+ svint16_t svlsl_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2)
+ svint16_t svlsl_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2)
+ LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D
+
To be added.
To be added.
@@ -30033,7 +33762,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svlsl[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2)
+ svint32_t svlsl[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2)
+ svint32_t svlsl[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2)
+ LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -30060,7 +33794,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svlsl_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2)
+ svint32_t svlsl_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2)
+ svint32_t svlsl_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2)
+ LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D
+
To be added.
To be added.
@@ -30087,7 +33826,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svlsl[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2)
+ svint64_t svlsl[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2)
+ svint64_t svlsl[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2)
+ LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -30114,7 +33858,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svlsl[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2)
+ svint8_t svlsl[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2)
+ svint8_t svlsl[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2)
+ LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -30141,7 +33890,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svlsl_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2)
+ svint8_t svlsl_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2)
+ svint8_t svlsl_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2)
+ LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D
+
To be added.
To be added.
@@ -30168,7 +33922,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svlsl[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svlsl[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svlsl[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -30195,7 +33954,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svlsl_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2)
+ svuint16_t svlsl_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2)
+ svuint16_t svlsl_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2)
+ LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D
+
To be added.
To be added.
@@ -30222,7 +33986,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svlsl[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svlsl[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svlsl[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -30249,7 +34018,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svlsl_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2)
+ svuint32_t svlsl_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2)
+ svuint32_t svlsl_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2)
+ LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D
+
To be added.
To be added.
@@ -30276,7 +34050,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svlsl[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svlsl[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svlsl[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -30303,7 +34082,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svasr[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2)
+ svint16_t svasr[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2)
+ svint16_t svasr[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2)
+ ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -30330,7 +34114,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svasr_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2)
+ svint16_t svasr_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2)
+ svint16_t svasr_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2)
+ ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.D
+
To be added.
To be added.
@@ -30357,7 +34146,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svasr[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2)
+ svint32_t svasr[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2)
+ svint32_t svasr[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2)
+ ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -30384,7 +34178,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svasr_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2)
+ svint32_t svasr_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2)
+ svint32_t svasr_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2)
+ ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.D
+
To be added.
To be added.
@@ -30411,7 +34210,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svasr[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2)
+ svint64_t svasr[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2)
+ svint64_t svasr[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2)
+ ASR Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -30438,7 +34242,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svasr[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2)
+ svint8_t svasr[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2)
+ svint8_t svasr[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2)
+ ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -30465,7 +34274,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svasr_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2)
+ svint8_t svasr_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2)
+ svint8_t svasr_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2)
+ ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.D
+
To be added.
To be added.
@@ -30499,7 +34313,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svasrd[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2)
+ svint16_t svasrd[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2)
+ svint16_t svasrd[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2)
+ ASRD Ztied1.H, Pg/M, Ztied1.H, #imm2
+
To be added.
To be added.
@@ -30533,7 +34352,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svasrd[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2)
+ svint32_t svasrd[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2)
+ svint32_t svasrd[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2)
+ ASRD Ztied1.S, Pg/M, Ztied1.S, #imm2
+
To be added.
To be added.
@@ -30567,7 +34391,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svasrd[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2)
+ svint64_t svasrd[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2)
+ svint64_t svasrd[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2)
+ ASRD Ztied1.D, Pg/M, Ztied1.D, #imm2
+
To be added.
To be added.
@@ -30601,7 +34430,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svasrd[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2)
+ svint8_t svasrd[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2)
+ svint8_t svasrd[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2)
+ ASRD Ztied1.B, Pg/M, Ztied1.B, #imm2
+
To be added.
To be added.
@@ -30628,7 +34462,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svlsr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svlsr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svlsr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -30655,7 +34494,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svlsr_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2)
+ svuint8_t svlsr_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2)
+ svuint8_t svlsr_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2)
+ LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.D
+
To be added.
To be added.
@@ -30682,7 +34526,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svlsr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svlsr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svlsr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -30709,7 +34558,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svlsr_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2)
+ svuint16_t svlsr_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2)
+ svuint16_t svlsr_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2)
+ LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.D
+
To be added.
To be added.
@@ -30736,7 +34590,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svlsr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svlsr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svlsr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -30763,7 +34622,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svlsr_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2)
+ svuint32_t svlsr_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2)
+ svuint32_t svlsr_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2)
+ LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.D
+
To be added.
To be added.
@@ -30790,7 +34654,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svlsr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svlsr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svlsr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ LSR Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -30815,7 +34684,12 @@
To be added.
- To be added.
+
+ svint32_t svexth[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svexth[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svexth[_s32]_z(svbool_t pg, svint32_t op)
+ SXTH Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -30840,7 +34714,12 @@
To be added.
- To be added.
+
+ svint64_t svexth[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svexth[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svexth[_s64]_z(svbool_t pg, svint64_t op)
+ SXTH Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -30865,7 +34744,12 @@
To be added.
- To be added.
+
+ svint64_t svextw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svextw[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svextw[_s64]_z(svbool_t pg, svint64_t op)
+ SXTW Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -30890,7 +34774,12 @@
To be added.
- To be added.
+
+ svint16_t svextb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op)
+ svint16_t svextb[_s16]_x(svbool_t pg, svint16_t op)
+ svint16_t svextb[_s16]_z(svbool_t pg, svint16_t op)
+ SXTB Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -30915,7 +34804,12 @@
To be added.
- To be added.
+
+ svint32_t svextb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op)
+ svint32_t svextb[_s32]_x(svbool_t pg, svint32_t op)
+ svint32_t svextb[_s32]_z(svbool_t pg, svint32_t op)
+ SXTB Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -30940,7 +34834,12 @@
To be added.
- To be added.
+
+ svint64_t svextb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op)
+ svint64_t svextb[_s64]_x(svbool_t pg, svint64_t op)
+ svint64_t svextb[_s64]_z(svbool_t pg, svint64_t op)
+ SXTB Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -30965,7 +34864,10 @@
To be added.
- To be added.
+
+ svint32_t svunpklo[_s32](svint16_t op)
+ SUNPKLO Zresult.S, Zop.H
+
To be added.
To be added.
@@ -30990,7 +34892,10 @@
To be added.
- To be added.
+
+ svint64_t svunpklo[_s64](svint32_t op)
+ SUNPKLO Zresult.D, Zop.S
+
To be added.
To be added.
@@ -31015,7 +34920,10 @@
To be added.
- To be added.
+
+ svint16_t svunpklo[_s16](svint8_t op)
+ SUNPKLO Zresult.H, Zop.B
+
To be added.
To be added.
@@ -31040,7 +34948,10 @@
To be added.
- To be added.
+
+ svint32_t svunpkhi[_s32](svint16_t op)
+ SUNPKHI Zresult.S, Zop.H
+
To be added.
To be added.
@@ -31065,7 +34976,10 @@
To be added.
- To be added.
+
+ svint64_t svunpkhi[_s64](svint32_t op)
+ SUNPKHI Zresult.D, Zop.S
+
To be added.
To be added.
@@ -31090,7 +35004,10 @@
To be added.
- To be added.
+
+ svint16_t svunpkhi[_s16](svint8_t op)
+ SUNPKHI Zresult.H, Zop.B
+
To be added.
To be added.
@@ -31119,7 +35036,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint8_t svsplice[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2)
+ SPLICE Ztied1.B, Pg, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -31148,7 +35068,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svsplice[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -31177,7 +35100,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint16_t svsplice[_s16](svbool_t pg, svint16_t op1, svint16_t op2)
+ SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -31206,7 +35132,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint32_t svsplice[_s32](svbool_t pg, svint32_t op1, svint32_t op2)
+ SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -31235,7 +35164,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint64_t svsplice[_s64](svbool_t pg, svint64_t op1, svint64_t op2)
+ SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -31264,7 +35196,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svint8_t svsplice[_s8](svbool_t pg, svint8_t op1, svint8_t op2)
+ SPLICE Ztied1.B, Pg, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -31293,7 +35228,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svsplice[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -31322,7 +35260,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint16_t svsplice[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2)
+ SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -31351,7 +35292,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint32_t svsplice[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2)
+ SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -31380,7 +35324,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svuint64_t svsplice[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2)
+ SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -31405,7 +35352,12 @@
To be added.
- To be added.
+
+ svfloat64_t svsqrt[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op)
+ svfloat64_t svsqrt[_f64]_x(svbool_t pg, svfloat64_t op)
+ svfloat64_t svsqrt[_f64]_z(svbool_t pg, svfloat64_t op)
+ FSQRT Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -31430,7 +35382,12 @@
To be added.
- To be added.
+
+ svfloat32_t svsqrt[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op)
+ svfloat32_t svsqrt[_f32]_x(svbool_t pg, svfloat32_t op)
+ svfloat32_t svsqrt[_f32]_z(svbool_t pg, svfloat32_t op)
+ FSQRT Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -31458,7 +35415,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_u8](svbool_t pg, uint8_t *base, svuint8_t data)
+ ST1B Zdata.B, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31492,7 +35452,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_u8](svbool_t pg, uint8_t *base, svuint8x4_t data)
+ ST4B {Zdata0.B - Zdata3.B}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31526,7 +35489,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_u8](svbool_t pg, uint8_t *base, svuint8x3_t data)
+ ST3B {Zdata0.B - Zdata2.B}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31560,7 +35526,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_u8](svbool_t pg, uint8_t *base, svuint8x2_t data)
+ ST2B {Zdata0.B, Zdata1.B}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31587,7 +35556,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_f64](svbool_t pg, float64_t *base, svfloat64_t data)
+ ST1D Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31621,7 +35593,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_f64](svbool_t pg, float64_t *base, svfloat64x4_t data)
+ ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31655,7 +35630,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_f64](svbool_t pg, float64_t *base, svfloat64x3_t data)
+ ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31689,7 +35667,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_f64](svbool_t pg, float64_t *base, svfloat64x2_t data)
+ ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31716,7 +35697,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_s16](svbool_t pg, int16_t *base, svint16_t data)
+ ST1H Zdata.H, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31750,7 +35734,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_s16](svbool_t pg, int16_t *base, svint16x4_t data)
+ ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31784,7 +35771,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_s16](svbool_t pg, int16_t *base, svint16x3_t data)
+ ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31818,7 +35808,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_s16](svbool_t pg, int16_t *base, svint16x2_t data)
+ ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31845,7 +35838,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_s32](svbool_t pg, int32_t *base, svint32_t data)
+ ST1W Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31879,7 +35875,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_s32](svbool_t pg, int32_t *base, svint32x4_t data)
+ ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31913,7 +35912,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_s32](svbool_t pg, int32_t *base, svint32x3_t data)
+ ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31947,7 +35949,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_s32](svbool_t pg, int32_t *base, svint32x2_t data)
+ ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -31974,7 +35979,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_s64](svbool_t pg, int64_t *base, svint64_t data)
+ ST1D Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32008,7 +36016,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_s64](svbool_t pg, int64_t *base, svint64x4_t data)
+ ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32042,7 +36053,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_s64](svbool_t pg, int64_t *base, svint64x3_t data)
+ ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32076,7 +36090,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_s64](svbool_t pg, int64_t *base, svint64x2_t data)
+ ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32103,7 +36120,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_s8](svbool_t pg, int8_t *base, svint8_t data)
+ ST1B Zdata.B, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32137,7 +36157,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_s8](svbool_t pg, int8_t *base, svint8x4_t data)
+ ST4B {Zdata0.B - Zdata3.B}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32171,7 +36194,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_s8](svbool_t pg, int8_t *base, svint8x3_t data)
+ ST3B {Zdata0.B - Zdata2.B}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32205,7 +36231,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_s8](svbool_t pg, int8_t *base, svint8x2_t data)
+ ST2B {Zdata0.B, Zdata1.B}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32232,7 +36261,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_f32](svbool_t pg, float32_t *base, svfloat32_t data)
+ ST1W Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32266,7 +36298,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_f32](svbool_t pg, float32_t *base, svfloat32x4_t data)
+ ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32300,7 +36335,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_f32](svbool_t pg, float32_t *base, svfloat32x3_t data)
+ ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32334,7 +36372,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_f32](svbool_t pg, float32_t *base, svfloat32x2_t data)
+ ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32361,7 +36402,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_u16](svbool_t pg, uint16_t *base, svuint16_t data)
+ ST1H Zdata.H, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32395,7 +36439,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_u16](svbool_t pg, uint16_t *base, svuint16x4_t data)
+ ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32429,7 +36476,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_u16](svbool_t pg, uint16_t *base, svuint16x3_t data)
+ ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32463,7 +36513,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_u16](svbool_t pg, uint16_t *base, svuint16x2_t data)
+ ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32490,7 +36543,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_u32](svbool_t pg, uint32_t *base, svuint32_t data)
+ ST1W Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32524,7 +36580,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_u32](svbool_t pg, uint32_t *base, svuint32x4_t data)
+ ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32558,7 +36617,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_u32](svbool_t pg, uint32_t *base, svuint32x3_t data)
+ ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32592,7 +36654,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_u32](svbool_t pg, uint32_t *base, svuint32x2_t data)
+ ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32619,7 +36684,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1[_u64](svbool_t pg, uint64_t *base, svuint64_t data)
+ ST1D Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32653,7 +36721,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst4[_u64](svbool_t pg, uint64_t *base, svuint64x4_t data)
+ ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32687,7 +36758,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst3[_u64](svbool_t pg, uint64_t *base, svuint64x3_t data)
+ ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32721,7 +36795,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst2[_u64](svbool_t pg, uint64_t *base, svuint64x2_t data)
+ ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32748,7 +36825,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b[_s16](svbool_t pg, int8_t *base, svint16_t data)
+ ST1B Zdata.H, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32775,7 +36855,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h[_s32](svbool_t pg, int16_t *base, svint32_t data)
+ ST1H Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32802,7 +36885,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b[_s32](svbool_t pg, int8_t *base, svint32_t data)
+ ST1B Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32829,7 +36915,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h[_s64](svbool_t pg, int16_t *base, svint64_t data)
+ ST1H Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32856,7 +36945,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w[_s64](svbool_t pg, int32_t *base, svint64_t data)
+ ST1W Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32883,7 +36975,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b[_s64](svbool_t pg, int8_t *base, svint64_t data)
+ ST1B Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32910,7 +37005,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b[_u16](svbool_t pg, uint8_t *base, svuint16_t data)
+ ST1B Zdata.H, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32937,7 +37035,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b[_u32](svbool_t pg, uint8_t *base, svuint32_t data)
+ ST1B Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32964,7 +37065,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h[_u32](svbool_t pg, uint16_t *base, svuint32_t data)
+ ST1H Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -32991,7 +37095,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1b[_u64](svbool_t pg, uint8_t *base, svuint64_t data)
+ ST1B Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33018,7 +37125,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1h[_u64](svbool_t pg, uint16_t *base, svuint64_t data)
+ ST1H Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33045,7 +37155,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svst1w[_u64](svbool_t pg, uint32_t *base, svuint64_t data)
+ ST1W Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33072,7 +37185,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_u8](svbool_t pg, uint8_t *base, svuint8_t data)
+ STNT1B Zdata.B, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33099,7 +37215,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_f64](svbool_t pg, float64_t *base, svfloat64_t data)
+ STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33126,7 +37245,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_s16](svbool_t pg, int16_t *base, svint16_t data)
+ STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33153,7 +37275,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_s32](svbool_t pg, int32_t *base, svint32_t data)
+ STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33180,7 +37305,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_s64](svbool_t pg, int64_t *base, svint64_t data)
+ STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33207,7 +37335,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_s8](svbool_t pg, int8_t *base, svint8_t data)
+ STNT1B Zdata.B, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33234,7 +37365,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_f32](svbool_t pg, float32_t *base, svfloat32_t data)
+ STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33261,7 +37395,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_u16](svbool_t pg, uint16_t *base, svuint16_t data)
+ STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33288,7 +37425,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_u32](svbool_t pg, uint32_t *base, svuint32_t data)
+ STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33315,7 +37455,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ void svstnt1[_u64](svbool_t pg, uint64_t *base, svuint64_t data)
+ STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL]
+
To be added.
@@ -33341,7 +37484,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t svsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -33368,7 +37516,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svsub[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svsub[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ svfloat64_t svsub[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
+ FSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -33395,7 +37548,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t svsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -33422,7 +37580,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t svsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -33449,7 +37612,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t svsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -33476,7 +37644,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t svsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -33503,7 +37676,12 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svsub[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svsub[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ svfloat32_t svsub[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
+ FSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -33530,7 +37708,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t svsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -33557,7 +37740,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t svsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -33584,7 +37772,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t svsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -33611,7 +37804,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svqsub[_u8](svuint8_t op1, svuint8_t op2)
+ UQSUB Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -33638,7 +37834,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svqsub[_s16](svint16_t op1, svint16_t op2)
+ SQSUB Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -33665,7 +37864,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svqsub[_s32](svint32_t op1, svint32_t op2)
+ SQSUB Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -33692,7 +37894,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svqsub[_s64](svint64_t op1, svint64_t op2)
+ SQSUB Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -33719,7 +37924,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svqsub[_s8](svint8_t op1, svint8_t op2)
+ SQSUB Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -33746,7 +37954,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svqsub[_u16](svuint16_t op1, svuint16_t op2)
+ UQSUB Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -33773,7 +37984,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svqsub[_u32](svuint32_t op1, svuint32_t op2)
+ UQSUB Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -33800,7 +38014,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svqsub[_u64](svuint64_t op1, svuint64_t op2)
+ UQSUB Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -33827,7 +38044,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -33854,7 +38074,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -33881,7 +38104,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -33908,7 +38134,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -33935,7 +38164,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -33962,7 +38194,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -33989,7 +38224,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34016,7 +38254,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_any(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34043,7 +38284,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34070,7 +38314,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34097,7 +38344,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34124,7 +38374,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34151,7 +38404,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34178,7 +38434,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34205,7 +38464,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34232,7 +38494,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_first(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34259,7 +38524,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34286,7 +38554,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34313,7 +38584,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34340,7 +38614,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34367,7 +38644,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34394,7 +38674,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34421,7 +38704,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34448,7 +38734,10 @@
To be added.
To be added.
- To be added.
+
+ bool svptest_last(svbool_t pg, svbool_t op)
+ PTEST
+
To be added.
To be added.
@@ -34475,7 +38764,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svtrn1[_u8](svuint8_t op1, svuint8_t op2)
+ TRN1 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -34502,7 +38794,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svtrn1[_f64](svfloat64_t op1, svfloat64_t op2)
+ TRN1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -34529,7 +38824,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svtrn1[_s16](svint16_t op1, svint16_t op2)
+ TRN1 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -34556,7 +38854,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svtrn1[_s32](svint32_t op1, svint32_t op2)
+ TRN1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -34583,7 +38884,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svtrn1[_s64](svint64_t op1, svint64_t op2)
+ TRN1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -34610,7 +38914,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svtrn1[_s8](svint8_t op1, svint8_t op2)
+ TRN1 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -34637,7 +38944,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svtrn1[_f32](svfloat32_t op1, svfloat32_t op2)
+ TRN1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -34664,7 +38974,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svtrn1[_u16](svuint16_t op1, svuint16_t op2)
+ TRN1 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -34691,7 +39004,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svtrn1[_u32](svuint32_t op1, svuint32_t op2)
+ TRN1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -34718,7 +39034,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svtrn1[_u64](svuint64_t op1, svuint64_t op2)
+ TRN1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -34745,7 +39064,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svtrn2[_u8](svuint8_t op1, svuint8_t op2)
+ TRN2 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -34772,7 +39094,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svtrn2[_f64](svfloat64_t op1, svfloat64_t op2)
+ TRN2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -34799,7 +39124,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svtrn2[_s16](svint16_t op1, svint16_t op2)
+ TRN2 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -34826,7 +39154,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svtrn2[_s32](svint32_t op1, svint32_t op2)
+ TRN2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -34853,7 +39184,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svtrn2[_s64](svint64_t op1, svint64_t op2)
+ TRN2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -34880,7 +39214,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svtrn2[_s8](svint8_t op1, svint8_t op2)
+ TRN2 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -34907,7 +39244,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svtrn2[_f32](svfloat32_t op1, svfloat32_t op2)
+ TRN2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -34934,7 +39274,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svtrn2[_u16](svuint16_t op1, svuint16_t op2)
+ TRN2 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -34961,7 +39304,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svtrn2[_u32](svuint32_t op1, svuint32_t op2)
+ TRN2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -34988,7 +39334,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svtrn2[_u64](svuint64_t op1, svuint64_t op2)
+ TRN2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35024,7 +39373,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat64_t svtmad[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3)
+ FTMAD Ztied1.D, Ztied1.D, Zop2.D, #imm3
+
To be added.
To be added.
@@ -35060,7 +39412,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ svfloat32_t svtmad[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3)
+ FTMAD Ztied1.S, Ztied1.S, Zop2.S, #imm3
+
To be added.
To be added.
@@ -35087,7 +39442,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svtssel[_f64](svfloat64_t op1, svuint64_t op2)
+ FTSSEL Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35114,7 +39472,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svtssel[_f32](svfloat32_t op1, svuint32_t op2)
+ FTSSEL Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35141,7 +39502,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svtsmul[_f64](svfloat64_t op1, svuint64_t op2)
+ FTSMUL Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35168,7 +39532,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svtsmul[_f32](svfloat32_t op1, svuint32_t op2)
+ FTSMUL Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35195,7 +39562,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svuzp1[_u8](svuint8_t op1, svuint8_t op2)
+ svbool_t svuzp1_b8(svbool_t op1, svbool_t op2)
+
To be added.
To be added.
@@ -35222,7 +39592,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svuzp1[_f64](svfloat64_t op1, svfloat64_t op2)
+ UZP1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35249,7 +39622,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svuzp1[_s16](svint16_t op1, svint16_t op2)
+ UZP1 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -35276,7 +39652,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svuzp1[_s32](svint32_t op1, svint32_t op2)
+ UZP1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35303,7 +39682,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svuzp1[_s64](svint64_t op1, svint64_t op2)
+ UZP1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35330,7 +39712,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svuzp1[_s8](svint8_t op1, svint8_t op2)
+ UZP1 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -35357,7 +39742,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svuzp1[_f32](svfloat32_t op1, svfloat32_t op2)
+ UZP1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35384,7 +39772,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svuzp1[_u16](svuint16_t op1, svuint16_t op2)
+ UZP1 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -35411,7 +39802,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svuzp1[_u32](svuint32_t op1, svuint32_t op2)
+ UZP1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35438,7 +39832,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svuzp1[_u64](svuint64_t op1, svuint64_t op2)
+ UZP1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35465,7 +39862,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svuzp2[_u8](svuint8_t op1, svuint8_t op2)
+ UZP2 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -35492,7 +39892,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svuzp2[_f64](svfloat64_t op1, svfloat64_t op2)
+ UZP2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35519,7 +39922,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svuzp2[_s16](svint16_t op1, svint16_t op2)
+ UZP2 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -35546,7 +39952,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svuzp2[_s32](svint32_t op1, svint32_t op2)
+ UZP2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35573,7 +39982,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svuzp2[_s64](svint64_t op1, svint64_t op2)
+ UZP2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35600,7 +40012,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svuzp2[_s8](svint8_t op1, svint8_t op2)
+ UZP2 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -35627,7 +40042,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svuzp2[_f32](svfloat32_t op1, svfloat32_t op2)
+ UZP2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35654,7 +40072,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svuzp2[_u16](svuint16_t op1, svuint16_t op2)
+ UZP2 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -35681,7 +40102,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svuzp2[_u32](svuint32_t op1, svuint32_t op2)
+ UZP2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -35708,7 +40132,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svuzp2[_u64](svuint64_t op1, svuint64_t op2)
+ UZP2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -35735,7 +40162,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svtbl[_u8](svuint8_t data, svuint8_t indices)
+ TBL Zresult.B, {Zdata.B}, Zindices.B
+
To be added.
To be added.
@@ -35762,7 +40192,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svtbl[_f64](svfloat64_t data, svuint64_t indices)
+ TBL Zresult.D, {Zdata.D}, Zindices.D
+
To be added.
To be added.
@@ -35789,7 +40222,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svtbl[_s16](svint16_t data, svuint16_t indices)
+ TBL Zresult.H, {Zdata.H}, Zindices.H
+
To be added.
To be added.
@@ -35816,7 +40252,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svtbl[_s32](svint32_t data, svuint32_t indices)
+ TBL Zresult.S, {Zdata.S}, Zindices.S
+
To be added.
To be added.
@@ -35843,7 +40282,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svtbl[_s64](svint64_t data, svuint64_t indices)
+ TBL Zresult.D, {Zdata.D}, Zindices.D
+
To be added.
To be added.
@@ -35870,7 +40312,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svtbl[_s8](svint8_t data, svuint8_t indices)
+ TBL Zresult.B, {Zdata.B}, Zindices.B
+
To be added.
To be added.
@@ -35897,7 +40342,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svtbl[_f32](svfloat32_t data, svuint32_t indices)
+ TBL Zresult.S, {Zdata.S}, Zindices.S
+
To be added.
To be added.
@@ -35924,7 +40372,10 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svtbl[_u16](svuint16_t data, svuint16_t indices)
+ TBL Zresult.H, {Zdata.H}, Zindices.H
+
To be added.
To be added.
@@ -35951,7 +40402,10 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svtbl[_u32](svuint32_t data, svuint32_t indices)
+ TBL Zresult.S, {Zdata.S}, Zindices.S
+
To be added.
To be added.
@@ -35978,7 +40432,10 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svtbl[_u64](svuint64_t data, svuint64_t indices)
+ TBL Zresult.D, {Zdata.D}, Zindices.D
+
To be added.
To be added.
@@ -36005,7 +40462,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t sveor[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t sveor[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ svuint8_t sveor[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2)
+ EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -36032,7 +40494,12 @@
To be added.
To be added.
- To be added.
+
+ svint16_t sveor[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t sveor[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2)
+ svint16_t sveor[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2)
+ EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -36059,7 +40526,12 @@
To be added.
To be added.
- To be added.
+
+ svint32_t sveor[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t sveor[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2)
+ svint32_t sveor[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2)
+ EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -36086,7 +40558,12 @@
To be added.
To be added.
- To be added.
+
+ svint64_t sveor[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t sveor[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2)
+ svint64_t sveor[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2)
+ EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -36113,7 +40590,12 @@
To be added.
To be added.
- To be added.
+
+ svint8_t sveor[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t sveor[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2)
+ svint8_t sveor[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2)
+ EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B
+
To be added.
To be added.
@@ -36140,7 +40622,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t sveor[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t sveor[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ svuint16_t sveor[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2)
+ EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H
+
To be added.
To be added.
@@ -36167,7 +40654,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t sveor[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t sveor[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ svuint32_t sveor[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2)
+ EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S
+
To be added.
To be added.
@@ -36194,7 +40686,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t sveor[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t sveor[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ svuint64_t sveor[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2)
+ EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D
+
To be added.
To be added.
@@ -36219,7 +40716,10 @@
To be added.
- To be added.
+
+ uint8_t sveorv[_u8](svbool_t pg, svuint8_t op)
+ EORV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -36244,7 +40744,10 @@
To be added.
- To be added.
+
+ int16_t sveorv[_s16](svbool_t pg, svint16_t op)
+ EORV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -36269,7 +40772,10 @@
To be added.
- To be added.
+
+ int32_t sveorv[_s32](svbool_t pg, svint32_t op)
+ EORV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -36294,7 +40800,10 @@
To be added.
- To be added.
+
+ int64_t sveorv[_s64](svbool_t pg, svint64_t op)
+ EORV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -36319,7 +40828,10 @@
To be added.
- To be added.
+
+ int8_t sveorv[_s8](svbool_t pg, svint8_t op)
+ EORV Bresult, Pg, Zop.B
+
To be added.
To be added.
@@ -36344,7 +40856,10 @@
To be added.
- To be added.
+
+ uint16_t sveorv[_u16](svbool_t pg, svuint16_t op)
+ EORV Hresult, Pg, Zop.H
+
To be added.
To be added.
@@ -36369,7 +40884,10 @@
To be added.
- To be added.
+
+ uint32_t sveorv[_u32](svbool_t pg, svuint32_t op)
+ EORV Sresult, Pg, Zop.S
+
To be added.
To be added.
@@ -36394,7 +40912,10 @@
To be added.
- To be added.
+
+ uint64_t sveorv[_u64](svbool_t pg, svuint64_t op)
+ EORV Dresult, Pg, Zop.D
+
To be added.
To be added.
@@ -36419,7 +40940,12 @@
To be added.
- To be added.
+
+ svuint32_t svexth[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svexth[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svexth[_u32]_z(svbool_t pg, svuint32_t op)
+ UXTH Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -36444,7 +40970,12 @@
To be added.
- To be added.
+
+ svuint64_t svexth[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svexth[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svexth[_u64]_z(svbool_t pg, svuint64_t op)
+ UXTH Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -36469,7 +41000,12 @@
To be added.
- To be added.
+
+ svuint64_t svextw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svextw[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svextw[_u64]_z(svbool_t pg, svuint64_t op)
+ UXTW Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -36494,7 +41030,12 @@
To be added.
- To be added.
+
+ svuint16_t svextb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op)
+ svuint16_t svextb[_u16]_x(svbool_t pg, svuint16_t op)
+ svuint16_t svextb[_u16]_z(svbool_t pg, svuint16_t op)
+ UXTB Zresult.H, Pg/M, Zop.H
+
To be added.
To be added.
@@ -36519,7 +41060,12 @@
To be added.
- To be added.
+
+ svuint32_t svextb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op)
+ svuint32_t svextb[_u32]_x(svbool_t pg, svuint32_t op)
+ svuint32_t svextb[_u32]_z(svbool_t pg, svuint32_t op)
+ UXTB Zresult.S, Pg/M, Zop.S
+
To be added.
To be added.
@@ -36544,7 +41090,12 @@
To be added.
- To be added.
+
+ svuint64_t svextb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op)
+ svuint64_t svextb[_u64]_x(svbool_t pg, svuint64_t op)
+ svuint64_t svextb[_u64]_z(svbool_t pg, svuint64_t op)
+ UXTB Zresult.D, Pg/M, Zop.D
+
To be added.
To be added.
@@ -36569,7 +41120,10 @@
To be added.
- To be added.
+
+ svuint16_t svunpklo[_u16](svuint8_t op)
+ UUNPKLO Zresult.H, Zop.B
+
To be added.
To be added.
@@ -36594,7 +41148,10 @@
To be added.
- To be added.
+
+ svuint32_t svunpklo[_u32](svuint16_t op)
+ UUNPKLO Zresult.S, Zop.H
+
To be added.
To be added.
@@ -36619,7 +41176,10 @@
To be added.
- To be added.
+
+ svuint64_t svunpklo[_u64](svuint32_t op)
+ UUNPKLO Zresult.D, Zop.S
+
To be added.
To be added.
@@ -36644,7 +41204,10 @@
To be added.
- To be added.
+
+ svuint16_t svunpkhi[_u16](svuint8_t op)
+ UUNPKHI Zresult.H, Zop.B
+
To be added.
To be added.
@@ -36669,7 +41232,10 @@
To be added.
- To be added.
+
+ svuint32_t svunpkhi[_u32](svuint16_t op)
+ UUNPKHI Zresult.S, Zop.H
+
To be added.
To be added.
@@ -36694,7 +41260,10 @@
To be added.
- To be added.
+
+ svuint64_t svunpkhi[_u64](svuint32_t op)
+ UUNPKHI Zresult.D, Zop.S
+
To be added.
To be added.
@@ -36721,7 +41290,10 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svzip2[_u8](svuint8_t op1, svuint8_t op2)
+ ZIP2 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -36748,7 +41320,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svzip2[_f64](svfloat64_t op1, svfloat64_t op2)
+ ZIP2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -36775,7 +41350,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svzip2[_s16](svint16_t op1, svint16_t op2)
+ ZIP2 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -36802,7 +41380,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svzip2[_s32](svint32_t op1, svint32_t op2)
+ ZIP2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -36829,7 +41410,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svzip2[_s64](svint64_t op1, svint64_t op2)
+ ZIP2 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -36856,7 +41440,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svzip2[_s8](svint8_t op1, svint8_t op2)
+ ZIP2 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -36883,7 +41470,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svzip2[_f32](svfloat32_t op1, svfloat32_t op2)
+ ZIP2 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -36910,7 +41500,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svzip2[_u16](svuint16_t op1, svuint16_t op2)
+ ZIP2 Zresult.H, Zop1.H, Zop2.H
+ svbool_t svzip2_b16(svbool_t op1, svbool_t op2)
+ ZIP2 Presult.H, Pop1.H, Pop2.H
+
To be added.
To be added.
@@ -36937,7 +41532,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svzip2[_u32](svuint32_t op1, svuint32_t op2)
+ ZIP2 Zresult.S, Zop1.S, Zop2.S
+ svbool_t svzip2_b32(svbool_t op1, svbool_t op2)
+ ZIP2 Presult.S, Pop1.S, Pop2.S
+
To be added.
To be added.
@@ -36964,7 +41564,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svzip2[_u64](svuint64_t op1, svuint64_t op2)
+ ZIP2 Zresult.D, Zop1.D, Zop2.D
+ svbool_t svzip2_b64(svbool_t op1, svbool_t op2)
+ ZIP2 Presult.D, Pop1.D, Pop2.D
+
To be added.
To be added.
@@ -36991,7 +41596,12 @@
To be added.
To be added.
- To be added.
+
+ svuint8_t svzip1[_u8](svuint8_t op1, svuint8_t op2)
+ ZIP1 Zresult.B, Zop1.B, Zop2.B
+ svbool_t svzip1_b8(svbool_t op1, svbool_t op2)
+ ZIP1 Presult.B, Pop1.B, Pop2.B
+
To be added.
To be added.
@@ -37018,7 +41628,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat64_t svzip1[_f64](svfloat64_t op1, svfloat64_t op2)
+ ZIP1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -37045,7 +41658,10 @@
To be added.
To be added.
- To be added.
+
+ svint16_t svzip1[_s16](svint16_t op1, svint16_t op2)
+ ZIP1 Zresult.H, Zop1.H, Zop2.H
+
To be added.
To be added.
@@ -37072,7 +41688,10 @@
To be added.
To be added.
- To be added.
+
+ svint32_t svzip1[_s32](svint32_t op1, svint32_t op2)
+ ZIP1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -37099,7 +41718,10 @@
To be added.
To be added.
- To be added.
+
+ svint64_t svzip1[_s64](svint64_t op1, svint64_t op2)
+ ZIP1 Zresult.D, Zop1.D, Zop2.D
+
To be added.
To be added.
@@ -37126,7 +41748,10 @@
To be added.
To be added.
- To be added.
+
+ svint8_t svzip1[_s8](svint8_t op1, svint8_t op2)
+ ZIP1 Zresult.B, Zop1.B, Zop2.B
+
To be added.
To be added.
@@ -37153,7 +41778,10 @@
To be added.
To be added.
- To be added.
+
+ svfloat32_t svzip1[_f32](svfloat32_t op1, svfloat32_t op2)
+ ZIP1 Zresult.S, Zop1.S, Zop2.S
+
To be added.
To be added.
@@ -37180,7 +41808,12 @@
To be added.
To be added.
- To be added.
+
+ svuint16_t svzip1[_u16](svuint16_t op1, svuint16_t op2)
+ ZIP1 Zresult.H, Zop1.H, Zop2.H
+ svbool_t svzip1_b16(svbool_t op1, svbool_t op2)
+ ZIP1 Presult.H, Pop1.H, Pop2.H
+
To be added.
To be added.
@@ -37207,7 +41840,12 @@
To be added.
To be added.
- To be added.
+
+ svuint32_t svzip1[_u32](svuint32_t op1, svuint32_t op2)
+ ZIP1 Zresult.S, Zop1.S, Zop2.S
+ svbool_t svzip1_b32(svbool_t op1, svbool_t op2)
+ ZIP1 Presult.S, Pop1.S, Pop2.S
+
To be added.
To be added.
@@ -37234,7 +41872,12 @@
To be added.
To be added.
- To be added.
+
+ svuint64_t svzip1[_u64](svuint64_t op1, svuint64_t op2)
+ ZIP1 Zresult.D, Zop1.D, Zop2.D
+ svbool_t svzip1_b64(svbool_t op1, svbool_t op2)
+ ZIP1 Presult.D, Pop1.D, Pop2.D
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.Arm/SveMaskPattern.xml b/xml/System.Runtime.Intrinsics.Arm/SveMaskPattern.xml
index 16309c28e9b..73c6529a5c4 100644
--- a/xml/System.Runtime.Intrinsics.Arm/SveMaskPattern.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/SveMaskPattern.xml
@@ -34,7 +34,9 @@
31
- To be added.
+
+ ALL
+
@@ -54,7 +56,9 @@
30
- To be added.
+
+ MUL3
+
@@ -74,7 +78,9 @@
29
- To be added.
+
+ MUL4
+
@@ -94,7 +100,9 @@
0
- To be added.
+
+ POW2
+
@@ -114,7 +122,9 @@
1
- To be added.
+
+ VL1
+
@@ -134,7 +144,9 @@
12
- To be added.
+
+ VL128
+
@@ -154,7 +166,9 @@
9
- To be added.
+
+ VL16
+
@@ -174,7 +188,9 @@
2
- To be added.
+
+ VL2
+
@@ -194,7 +210,9 @@
13
- To be added.
+
+ VL256
+
@@ -214,7 +232,9 @@
3
- To be added.
+
+ VL3
+
@@ -234,7 +254,9 @@
10
- To be added.
+
+ VL32
+
@@ -254,7 +276,9 @@
4
- To be added.
+
+ VL4
+
@@ -274,7 +298,9 @@
5
- To be added.
+
+ VL5
+
@@ -294,7 +320,9 @@
6
- To be added.
+
+ VL6
+
@@ -314,7 +342,9 @@
11
- To be added.
+
+ VL64
+
@@ -334,7 +364,9 @@
7
- To be added.
+
+ VL7
+
@@ -354,7 +386,9 @@
8
- To be added.
+
+ VL8
+
diff --git a/xml/System.Runtime.Intrinsics.Arm/SvePrefetchType.xml b/xml/System.Runtime.Intrinsics.Arm/SvePrefetchType.xml
index cfb3f0fce0d..42839a26e01 100644
--- a/xml/System.Runtime.Intrinsics.Arm/SvePrefetchType.xml
+++ b/xml/System.Runtime.Intrinsics.Arm/SvePrefetchType.xml
@@ -34,7 +34,9 @@
1
- To be added.
+
+ PLDL1STRM
+
@@ -54,7 +56,9 @@
0
- To be added.
+
+ PLDL1KEEP
+
@@ -74,7 +78,9 @@
3
- To be added.
+
+ PLDL2STRM
+
@@ -94,7 +100,9 @@
2
- To be added.
+
+ PLDL2KEEP
+
@@ -114,7 +122,9 @@
5
- To be added.
+
+ PLDL3STRM
+
@@ -134,7 +144,9 @@
4
- To be added.
+
+ PLDL3KEEP
+
@@ -154,7 +166,9 @@
9
- To be added.
+
+ PSTL1STRM
+
@@ -174,7 +188,9 @@
8
- To be added.
+
+ PSTL1KEEP
+
@@ -194,7 +210,9 @@
11
- To be added.
+
+ PSTL2STRM
+
@@ -214,7 +232,9 @@
10
- To be added.
+
+ PSTL2KEEP
+
@@ -234,7 +254,9 @@
13
- To be added.
+
+ PSTL3STRM
+
@@ -254,7 +276,9 @@
12
- To be added.
+
+ PSTL3KEEP
+
diff --git a/xml/System.Runtime.Intrinsics.Wasm/PackedSimd.xml b/xml/System.Runtime.Intrinsics.Wasm/PackedSimd.xml
index e741c836286..d29c38e6697 100644
--- a/xml/System.Runtime.Intrinsics.Wasm/PackedSimd.xml
+++ b/xml/System.Runtime.Intrinsics.Wasm/PackedSimd.xml
@@ -5710,9 +5710,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Aes+X64.xml b/xml/System.Runtime.Intrinsics.X86/Aes+X64.xml
index 9808506dfa1..82250fdcd96 100644
--- a/xml/System.Runtime.Intrinsics.X86/Aes+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Aes+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 AES hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Aes.xml b/xml/System.Runtime.Intrinsics.X86/Aes.xml
index 3990eeb585e..9551b8d2286 100644
--- a/xml/System.Runtime.Intrinsics.X86/Aes.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Aes.xml
@@ -230,9 +230,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx+X64.xml
index a0382ef332d..170e2ef98d1 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 AVX hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx.xml b/xml/System.Runtime.Intrinsics.X86/Avx.xml
index 8ce9452be7f..ba603abd9d9 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx.xml
@@ -3408,9 +3408,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512+X64.xml
index bd07c56beb8..db7d5d300c4 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512+X64.xml
@@ -14,7 +14,7 @@
- To be added.
+ Provides access to the x86 AVX10.1/512 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -34,9 +34,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512.xml b/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512.xml
index 4e54aff777e..0045408d520 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx10v1+V512.xml
@@ -14,7 +14,7 @@
- To be added.
+ Provides access to the x86 AVX10.1/512 hardware instructions via intrinsics.
To be added.
@@ -40,7 +40,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_and_pd (__m512d a, __m512d b)
+ VANDPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -67,7 +70,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_and_ps (__m512 a, __m512 b)
+ VANDPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -94,7 +100,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_andnot_pd (__m512d a, __m512d b)
+ VANDNPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -121,7 +130,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_andnot_ps (__m512 a, __m512 b)
+ VANDNPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -146,7 +158,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -171,7 +186,10 @@
To be added.
- To be added.
+
+ __m512 _mm512_broadcast_f32x2 (__m128 a)
+ VBROADCASTF32x2 zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -196,7 +214,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 zmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -220,7 +241,10 @@
To be added.
- To be added.
+
+ __m512d _mm512_broadcast_f64x2 (__m128d const * mem_addr)
+ VBROADCASTF64x2 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -244,7 +268,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_broadcast_i64x2 (__m128i const * mem_addr)
+ VBROADCASTI64x2 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -268,7 +295,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_broadcast_i64x2 (__m128i const * mem_addr)
+ VBROADCASTI64x2 zmm1 {k1}{z}, m128
+
To be added.
To be added.
@@ -292,7 +322,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_broadcast_i32x8 (__m256i const * mem_addr)
+ VBROADCASTI32x8 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -316,7 +349,10 @@
To be added.
- To be added.
+
+ __m512 _mm512_broadcast_f32x8 (__m256 const * mem_addr)
+ VBROADCASTF32x8 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -340,7 +376,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_broadcast_i32x8 (__m256i const * mem_addr)
+ VBROADCASTI32x8 zmm1 {k1}{z}, m256
+
To be added.
To be added.
@@ -365,7 +404,10 @@
To be added.
- To be added.
+
+ __m512 _mm512_cvtepi64_ps (__m512i a)
+ VCVTQQ2PS ymm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -390,7 +432,10 @@
To be added.
- To be added.
+
+ __m512 _mm512_cvtepu64_ps (__m512i a)
+ VCVTUQQ2PS ymm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -424,7 +469,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm512_cvt_roundepi64_ps (__m512i a, int r)
+ VCVTQQ2PS ymm1, zmm2 {er}
+
To be added.
To be added.
@@ -458,7 +506,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm512_cvt_roundepu64_ps (__m512i a, int r)
+ VCVTUQQ2PS ymm1, zmm2 {er}
+
To be added.
To be added.
@@ -483,7 +534,10 @@
To be added.
- To be added.
+
+ __m512d _mm512_cvtepi64_pd (__m512i a)
+ VCVTQQ2PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -508,7 +562,10 @@
To be added.
- To be added.
+
+ __m512d _mm512_cvtepu64_pd (__m512i a)
+ VCVTUQQ2PD zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -542,7 +599,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_cvt_roundepi64_pd (__m512i a, int r)
+ VCVTQQ2PD zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -576,7 +636,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_cvt_roundepu64_pd (__m512i a, int r)
+ VCVTUQQ2PD zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -601,7 +664,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvtps_epi64 (__m512 a)
+ VCVTPS2QQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -626,7 +692,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvtpd_epi64 (__m512d a)
+ VCVTPD2QQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -660,7 +729,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundps_epi64 (__m512 a, int r)
+ VCVTPS2QQ zmm1, ymm2 {er}
+
To be added.
To be added.
@@ -694,7 +766,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundpd_epi64 (__m512d a, int r)
+ VCVTPD2QQ zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -719,7 +794,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvttps_epi64 (__m512 a)
+ VCVTTPS2QQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -744,7 +822,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvttpd_epi64 (__m512 a)
+ VCVTTPD2QQ zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}
+
To be added.
To be added.
@@ -769,7 +850,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvtps_epu64 (__m512 a)
+ VCVTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -794,7 +878,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvtpd_epu64 (__m512d a)
+ VCVTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -828,7 +915,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundps_epu64 (__m512 a, int r)
+ VCVTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -862,7 +952,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundpd_epu64 (__m512d a, int r)
+ VCVTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -887,7 +980,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvttps_epu64 (__m512 a)
+ VCVTTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -912,7 +1008,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_cvttpd_epu64 (__m512d a)
+ VCVTTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -937,7 +1036,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_conflict_epi32 (__m512i a)
+ VPCONFLICTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -962,7 +1064,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_conflict_epi64 (__m512i a)
+ VPCONFLICTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -987,7 +1092,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_conflict_epi32 (__m512i a)
+ VPCONFLICTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -1012,7 +1120,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_conflict_epi64 (__m512i a)
+ VPCONFLICTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -1046,7 +1157,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm512_extractf64x2_pd (__m512d a, const int imm8)
+ VEXTRACTF64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -1080,7 +1194,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm512_extracti64x2_epi64 (__m512i a, const int imm8)
+ VEXTRACTI64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -1114,7 +1231,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm512_extracti64x2_epi64 (__m512i a, const int imm8)
+ VEXTRACTI64x2 xmm1/m128 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -1148,7 +1268,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm512_extracti32x8_epi32 (__m512i a, const int imm8)
+ VEXTRACTI32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -1182,7 +1305,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm512_extractf32x8_ps (__m512 a, const int imm8)
+ VEXTRACTF32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -1216,7 +1342,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm512_extracti32x8_epi32 (__m512i a, const int imm8)
+ VEXTRACTI32x8 ymm1/m256 {k1}{z}, zmm2, imm8
+
To be added.
To be added.
@@ -1252,7 +1381,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_insertf64x2_pd (__m512d a, __m128d b, int imm8)
+ VINSERTF64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -1288,7 +1420,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_inserti64x2_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -1324,7 +1459,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_inserti64x2_si512 (__m512i a, __m128i b, const int imm8)
+ VINSERTI64x2 zmm1 {k1}{z}, zmm2, xmm3/m128, imm8
+
To be added.
To be added.
@@ -1360,7 +1498,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_inserti32x8_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -1396,7 +1537,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_insertf32x8_ps (__m512 a, __m256 b, int imm8)
+ VINSERTF32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -1432,7 +1576,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_inserti32x8_si512 (__m512i a, __m256i b, const int imm8)
+ VINSERTI32x8 zmm1 {k1}{z}, zmm2, xmm3/m256, imm8
+
To be added.
To be added.
@@ -1453,9 +1600,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
@@ -1478,7 +1626,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_lzcnt_epi32 (__m512i a)
+ VPLZCNTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -1503,7 +1654,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_lzcnt_epi64 (__m512i a)
+ VPLZCNTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -1528,7 +1682,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_lzcnt_epi32 (__m512i a)
+ VPLZCNTD zmm1 {k1}{z}, zmm2/m512/m32bcst
+
To be added.
To be added.
@@ -1553,7 +1710,10 @@
To be added.
- To be added.
+
+ __m512i _mm512_lzcnt_epi64 (__m512i a)
+ VPLZCNTQ zmm1 {k1}{z}, zmm2/m512/m64bcst
+
To be added.
To be added.
@@ -1580,7 +1740,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_mullo_epi64 (__m512i a, __m512i b)
+ VPMULLQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1607,7 +1770,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_mullo_epi64 (__m512i a, __m512i b)
+ VPMULLQ zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1634,7 +1800,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_multishift_epi64_epi8( __m512i a, __m512i b)
+ VPMULTISHIFTQB zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1661,7 +1830,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_multishift_epi64_epi8( __m512i a, __m512i b)
+ VPMULTISHIFTQB zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1688,7 +1860,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_or_pd (__m512d a, __m512d b)
+ VORPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -1715,7 +1890,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_or_ps (__m512 a, __m512 b)
+ VORPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
@@ -1742,7 +1920,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_permutevar64x8_epi8 (__m512i a, __m512i b)
+ VPERMB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1769,7 +1950,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_permutevar64x8_epi8 (__m512i a, __m512i b)
+ VPERMB zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1798,7 +1982,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_permutex2var_epi8 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2B zmm1 {k1}{z}, zmm2, zmm3/m512
+ VPERMT2B zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1827,7 +2015,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_permutex2var_epi8 (__m512i a, __m512i idx, __m512i b)
+ VPERMI2B zmm1 {k1}{z}, zmm2, zmm3/m512
+ VPERMT2B zmm1 {k1}{z}, zmm2, zmm3/m512
+
To be added.
To be added.
@@ -1863,7 +2055,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_range_pd(__m512d a, __m512d b, int imm);
+ VRANGEPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst{sae}, imm8
+
To be added.
To be added.
@@ -1899,7 +2094,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_range_ps(__m512 a, __m512 b, int imm);
+ VRANGEPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst{sae}, imm8
+
To be added.
To be added.
@@ -1933,7 +2131,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_reduce_pd(__m512d a, int imm);
+ VREDUCEPD zmm1 {k1}{z}, zmm2/m512/m64bcst{sae}, imm8
+
To be added.
To be added.
@@ -1967,7 +2168,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_reduce_ps(__m512 a, int imm);
+ VREDUCEPS zmm1 {k1}{z}, zmm2/m512/m32bcst{sae}, imm8
+
To be added.
To be added.
@@ -1994,7 +2198,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_xor_pd (__m512d a, __m512d b)
+ VXORPD zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -2021,7 +2228,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_xor_ps (__m512 a, __m512 b)
+ VXORPS zmm1 {k1}{z}, zmm2, zmm3/m512/m32bcst
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx10v1+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx10v1+X64.xml
index e2901556baa..ebc01b15d05 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx10v1+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx10v1+X64.xml
@@ -14,7 +14,7 @@
- To be added.
+ Provides access to the x86 AVX10.1 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -40,7 +40,11 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_cvtsi64_sd (__m128d a, __int64 b)
+ VCVTUSI2SD xmm1, xmm2, r/m64
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -76,7 +80,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_cvt_roundsi64_sd (__m128d a, __int64 b, int rounding)
+ VCVTSI2SD xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -112,7 +120,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_cvt_roundu64_sd (__m128d a, unsigned __int64 b, int rounding)
+ VCVTUSI2SD xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -139,7 +151,11 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvtsi64_ss (__m128 a, __int64 b)
+ VCVTUSI2SS xmm1, xmm2, r/m64
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -175,7 +191,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundi64_ss (__m128 a, __int64 b, int rounding)
+ VCVTSI2SS xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -211,7 +231,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundu64_ss (__m128 a, unsigned __int64 b, int rounding)
+ VCVTUSI2SS xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -245,7 +269,11 @@
To be added.
To be added.
- To be added.
+
+ __int64 _mm_cvt_roundsd_i64 (__m128d a, int rounding)
+ VCVTSD2SI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -279,7 +307,11 @@
To be added.
To be added.
- To be added.
+
+ __int64 _mm_cvt_roundss_i64 (__m128 a, int rounding)
+ VCVTSS2SI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -304,7 +336,11 @@
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvtsd_u64 (__m128d a)
+ VCVTSD2USI r64, xmm1/m64{er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -329,7 +365,11 @@
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvtss_u64 (__m128 a)
+ VCVTSS2USI r64, xmm1/m32{er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -363,7 +403,11 @@
To be added.
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvt_roundsd_u64 (__m128d a, int rounding)
+ VCVTSD2USI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -397,7 +441,11 @@
To be added.
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvt_roundss_u64 (__m128 a, int rounding)
+ VCVTSS2USI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -422,7 +470,11 @@
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvttsd_u64 (__m128d a)
+ VCVTTSD2USI r64, xmm1/m64{er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -447,7 +499,11 @@
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvttss_u64 (__m128 a)
+ VCVTTSS2USI r64, xmm1/m32{er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -468,9 +524,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx10v1.xml b/xml/System.Runtime.Intrinsics.X86/Avx10v1.xml
index ccae47bef8e..63c1c6cd7c9 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx10v1.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx10v1.xml
@@ -20,7 +20,7 @@
- To be added.
+ Provides access to X86 AVX10.1 hardware instructions via intrinsics
To be added.
@@ -44,7 +44,10 @@
To be added.
- To be added.
+
+ __m128i _mm_abs_epi64 (__m128i a)
+ VPABSQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -69,7 +72,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_abs_epi64 (__m128i a)
+ VPABSQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -105,7 +111,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_add_round_sd (__m128d a, __m128d b, int rounding)
+ VADDSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -141,7 +150,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_add_round_ss (__m128 a, __m128 b, int rounding)
+ VADDSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -177,7 +189,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)
+ VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -213,7 +228,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_alignr_epi32 (__m128i a, __m128i b, const int count)
+ VALIGND xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -249,7 +267,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)
+ VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -285,7 +306,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_alignr_epi32 (__m256i a, __m256i b, const int count)
+ VALIGND ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -321,7 +345,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)
+ VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -357,7 +384,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_alignr_epi64 (__m128i a, __m128i b, const int count)
+ VALIGNQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -393,7 +423,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)
+ VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -429,7 +462,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_alignr_epi64 (__m256i a, __m256i b, const int count)
+ VALIGNQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -454,7 +490,10 @@
To be added.
- To be added.
+
+ __m128i _mm_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -479,7 +518,10 @@
To be added.
- To be added.
+
+ __m128i _mm_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 xmm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -504,7 +546,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -529,7 +574,10 @@
To be added.
- To be added.
+
+ __m256 _mm256_broadcast_f32x2 (__m128 a)
+ VBROADCASTF32x2 ymm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -554,7 +602,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_broadcast_i32x2 (__m128i a)
+ VBROADCASTI32x2 ymm1 {k1}{z}, xmm2/m64
+
To be added.
To be added.
@@ -581,7 +632,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpgt_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(6)
+
To be added.
To be added.
@@ -608,7 +662,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpgt_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(6)
+
To be added.
To be added.
@@ -635,7 +692,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpgt_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(6)
+
To be added.
To be added.
@@ -662,7 +722,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpgt_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(6)
+
To be added.
To be added.
@@ -689,7 +752,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpgt_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(6)
+
To be added.
To be added.
@@ -716,7 +782,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpgt_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(6)
+
To be added.
To be added.
@@ -743,7 +812,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpgt_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(6)
+
To be added.
To be added.
@@ -770,7 +842,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpgt_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(6)
+
To be added.
To be added.
@@ -797,7 +872,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -824,7 +902,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -851,7 +932,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -878,7 +962,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -905,7 +992,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -932,7 +1022,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(5)
+
To be added.
To be added.
@@ -959,7 +1052,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -986,7 +1082,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpge_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -1013,7 +1112,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -1040,7 +1142,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -1067,7 +1172,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -1094,7 +1202,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -1121,7 +1232,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -1148,7 +1262,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(5)
+
To be added.
To be added.
@@ -1175,7 +1292,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(5)
+
To be added.
To be added.
@@ -1202,7 +1322,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpge_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(5)
+
To be added.
To be added.
@@ -1229,7 +1352,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -1256,7 +1382,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -1283,7 +1412,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -1310,7 +1442,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -1337,7 +1472,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -1364,7 +1502,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(1)
+
To be added.
To be added.
@@ -1391,7 +1532,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -1418,7 +1562,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmplt_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -1445,7 +1592,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -1472,7 +1622,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -1499,7 +1652,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -1526,7 +1682,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -1553,7 +1712,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -1580,7 +1742,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(1)
+
To be added.
To be added.
@@ -1607,7 +1772,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(1)
+
To be added.
To be added.
@@ -1634,7 +1802,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmplt_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(1)
+
To be added.
To be added.
@@ -1661,7 +1832,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -1688,7 +1862,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -1715,7 +1892,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -1742,7 +1922,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -1769,7 +1952,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -1796,7 +1982,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(2)
+
To be added.
To be added.
@@ -1823,7 +2012,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -1850,7 +2042,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmple_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -1877,7 +2072,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -1904,7 +2102,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -1931,7 +2132,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -1958,7 +2162,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -1985,7 +2192,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -2012,7 +2222,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(2)
+
To be added.
To be added.
@@ -2039,7 +2252,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(2)
+
To be added.
To be added.
@@ -2066,7 +2282,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmple_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(2)
+
To be added.
To be added.
@@ -2093,7 +2312,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epu8 (__m128i a, __m128i b)
+ VPCMPUB k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -2120,7 +2342,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epi16 (__m128i a, __m128i b)
+ VPCMPW k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -2147,7 +2372,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epi32 (__m128i a, __m128i b)
+ VPCMPD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -2174,7 +2402,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epi64 (__m128i a, __m128i b)
+ VPCMPQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -2201,7 +2432,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epi8 (__m128i a, __m128i b)
+ VPCMPB k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -2228,7 +2462,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epu16 (__m128i a, __m128i b)
+ VPCMPUW k1 {k2}, xmm2, xmm3/m128, imm8(4)
+
To be added.
To be added.
@@ -2255,7 +2492,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epu32 (__m128i a, __m128i b)
+ VPCMPUD k1 {k2}, xmm2, xmm3/m128/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -2282,7 +2522,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_cmpne_epu64 (__m128i a, __m128i b)
+ VPCMPUQ k1 {k2}, xmm2, xmm3/m128/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -2309,7 +2552,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epu8 (__m256i a, __m256i b)
+ VPCMPUB k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -2336,7 +2582,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epi16 (__m256i a, __m256i b)
+ VPCMPW k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -2363,7 +2612,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epi32 (__m256i a, __m256i b)
+ VPCMPD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -2390,7 +2642,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epi64 (__m256i a, __m256i b)
+ VPCMPQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -2417,7 +2672,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epi8 (__m256i a, __m256i b)
+ VPCMPB k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -2444,7 +2702,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epu16 (__m256i a, __m256i b)
+ VPCMPUW k1 {k2}, ymm2, ymm3/m256, imm8(4)
+
To be added.
To be added.
@@ -2471,7 +2732,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epu32 (__m256i a, __m256i b)
+ VPCMPUD k1 {k2}, ymm2, ymm3/m256/m32bcst, imm8(4)
+
To be added.
To be added.
@@ -2498,7 +2762,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_cmpne_epu64 (__m256i a, __m256i b)
+ VPCMPUQ k1 {k2}, ymm2, ymm3/m256/m64bcst, imm8(4)
+
To be added.
To be added.
@@ -2525,7 +2792,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_cvtsi32_sd (__m128d a, int b)
+ VCVTUSI2SD xmm1, xmm2, r/m32
+
To be added.
To be added.
@@ -2552,7 +2822,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvtsi32_ss (__m128 a, int b)
+ VCVTUSI2SS xmm1, xmm2, r/m32
+
To be added.
To be added.
@@ -2588,7 +2861,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding)
+ VCVTSI2SS xmm1, xmm2, r32 {er}
+
To be added.
To be added.
@@ -2624,7 +2900,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundsd_ss (__m128 a, __m128d b, int rounding)
+ VCVTSD2SS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -2660,7 +2939,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding)
+ VCVTUSI2SS xmm1, xmm2, r32 {er}
+
To be added.
To be added.
@@ -2694,7 +2976,10 @@
To be added.
To be added.
- To be added.
+
+ int _mm_cvt_roundsd_i32 (__m128d a, int rounding)
+ VCVTSD2SI r32, xmm1 {er}
+
To be added.
To be added.
@@ -2728,7 +3013,10 @@
To be added.
To be added.
- To be added.
+
+ int _mm_cvt_roundss_i32 (__m128 a, int rounding)
+ VCVTSS2SIK r32, xmm1 {er}
+
To be added.
To be added.
@@ -2753,7 +3041,10 @@
To be added.
- To be added.
+
+ unsigned int _mm_cvtsd_u32 (__m128d a)
+ VCVTSD2USI r32, xmm1/m64{er}
+
To be added.
To be added.
@@ -2778,7 +3069,10 @@
To be added.
- To be added.
+
+ unsigned int _mm_cvtss_u32 (__m128 a)
+ VCVTSS2USI r32, xmm1/m32{er}
+
To be added.
To be added.
@@ -2812,7 +3106,10 @@
To be added.
To be added.
- To be added.
+
+ unsigned int _mm_cvt_roundsd_u32 (__m128d a, int rounding)
+ VCVTSD2USI r32, xmm1 {er}
+
To be added.
To be added.
@@ -2846,7 +3143,10 @@
To be added.
To be added.
- To be added.
+
+ unsigned int _mm_cvt_roundss_u32 (__m128 a, int rounding)
+ VCVTSS2USI r32, xmm1 {er}
+
To be added.
To be added.
@@ -2871,7 +3171,10 @@
To be added.
- To be added.
+
+ unsigned int _mm_cvttsd_u32 (__m128d a)
+ VCVTTSD2USI r32, xmm1/m64{er}
+
To be added.
To be added.
@@ -2896,7 +3199,10 @@
To be added.
- To be added.
+
+ unsigned int _mm_cvttss_u32 (__m128 a)
+ VCVTTSS2USI r32, xmm1/m32{er}
+
To be added.
To be added.
@@ -2921,7 +3227,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2946,7 +3255,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2971,7 +3283,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -2996,7 +3311,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3021,7 +3339,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3046,7 +3367,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3071,7 +3395,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3096,7 +3423,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3121,7 +3451,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3146,7 +3479,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3171,7 +3507,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3196,7 +3535,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3221,7 +3563,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtusepi16_epi8 (__m128i a)
+ VPMOVUWB xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3246,7 +3591,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtusepi32_epi8 (__m128i a)
+ VPMOVUSDB xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3271,7 +3619,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtusepi64_epi8 (__m128i a)
+ VPMOVUSQB xmm1/m16 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3296,7 +3647,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtusepi16_epi8 (__m256i a)
+ VPMOVUWB xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3321,7 +3675,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtusepi32_epi8 (__m256i a)
+ VPMOVUSDB xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3346,7 +3703,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtusepi64_epi8 (__m256i a)
+ VPMOVUSQB xmm1/m32 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3371,7 +3731,10 @@
To be added.
- To be added.
+
+ __m128d _mm_cvtepi64_pd (__m128i a)
+ VCVTQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -3396,7 +3759,10 @@
To be added.
- To be added.
+
+ __m128d _mm_cvtepu32_pd (__m128i a)
+ VCVTUDQ2PD xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -3421,7 +3787,10 @@
To be added.
- To be added.
+
+ __m128d _mm_cvtepu64_pd (__m128i a)
+ VCVTUQQ2PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -3446,7 +3815,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3471,7 +3843,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3496,7 +3871,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3521,7 +3899,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3546,7 +3927,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3571,7 +3955,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3596,7 +3983,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3621,7 +4011,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3646,7 +4039,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtsepi32_epi16 (__m128i a)
+ VPMOVSDW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3671,7 +4067,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtsepi64_epi16 (__m128i a)
+ VPMOVSQW xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3696,7 +4095,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtsepi32_epi16 (__m256i a)
+ VPMOVSDW xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3721,7 +4123,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtsepi64_epi16 (__m256i a)
+ VPMOVSQW xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -3746,7 +4151,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3771,7 +4179,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3796,7 +4207,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3821,7 +4235,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3846,7 +4263,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtsepi64_epi32 (__m128i a)
+ VPMOVSQD xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -3871,7 +4291,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtsepi64_epi32 (__m256i a)
+ VPMOVSQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -3896,7 +4319,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtpd_epi64 (__m128d a)
+ VCVTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -3921,7 +4347,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtps_epi64 (__m128 a)
+ VCVTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -3946,7 +4375,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvttpd_epi64 (__m128d a)
+ VCVTTPD2QQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -3971,7 +4403,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvttps_epi64 (__m128 a)
+ VCVTTPS2QQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -3996,7 +4431,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4021,7 +4459,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4046,7 +4487,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4071,7 +4515,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi16_epi8 (__m128i a)
+ VPMOVWB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4096,7 +4543,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi8 (__m128i a)
+ VPMOVDB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4121,7 +4571,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi8 (__m128i a)
+ VPMOVQB xmm1/m16 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4146,7 +4599,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4171,7 +4627,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4196,7 +4655,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4221,7 +4683,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi16_epi8 (__m256i a)
+ VPMOVWB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4246,7 +4711,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi8 (__m256i a)
+ VPMOVDB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4271,7 +4739,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi8 (__m256i a)
+ VPMOVQB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4296,7 +4767,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtsepi16_epi8 (__m128i a)
+ VPMOVSWB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4321,7 +4795,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtsepi32_epi8 (__m128i a)
+ VPMOVSDB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4346,7 +4823,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtsepi64_epi8 (__m128i a)
+ VPMOVSQB xmm1/m16 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4371,7 +4851,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtsepi16_epi8 (__m256i a)
+ VPMOVSWB xmm1/m128 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4396,7 +4879,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtsepi32_epi8 (__m256i a)
+ VPMOVSDB xmm1/m64 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4421,7 +4907,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtsepi64_epi8 (__m256i a)
+ VPMOVSQB xmm1/m32 {k1}{z}, zmm2
+
To be added.
To be added.
@@ -4446,7 +4935,10 @@
To be added.
- To be added.
+
+ __m128 _mm_cvtepi64_ps (__m128i a)
+ VCVTQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -4471,7 +4963,10 @@
To be added.
- To be added.
+
+ __m128 _mm_cvtepu32_ps (__m128i a)
+ VCVTUDQ2PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -4496,7 +4991,10 @@
To be added.
- To be added.
+
+ __m128 _mm_cvtepu64_ps (__m128i a)
+ VCVTUQQ2PS xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -4521,7 +5019,10 @@
To be added.
- To be added.
+
+ __m128 _mm256_cvtepi64_ps (__m256i a)
+ VCVTQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -4546,7 +5047,10 @@
To be added.
- To be added.
+
+ __m128 _mm256_cvtepu64_ps (__m256i a)
+ VCVTUQQ2PS xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -4571,7 +5075,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4596,7 +5103,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4621,7 +5131,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi32_epi16 (__m128i a)
+ VPMOVDW xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4646,7 +5159,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi16 (__m128i a)
+ VPMOVQW xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4671,7 +5187,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -4696,7 +5215,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -4721,7 +5243,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi32_epi16 (__m256i a)
+ VPMOVDW xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -4746,7 +5271,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi16 (__m256i a)
+ VPMOVQW xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -4771,7 +5299,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtusepi32_epi16 (__m128i a)
+ VPMOVUSDW xmm1/m64 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4796,7 +5327,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtusepi64_epi16 (__m128i a)
+ VPMOVUSQW xmm1/m32 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4821,7 +5355,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtusepi32_epi16 (__m256i a)
+ VPMOVUSDW xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -4846,7 +5383,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtusepi64_epi16 (__m256i a)
+ VPMOVUSQW xmm1/m64 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -4871,7 +5411,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtpd_epu32 (__m128d a)
+ VCVTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -4896,7 +5439,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m128 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4921,7 +5467,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtps_epu32 (__m128 a)
+ VCVTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -4946,7 +5495,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtepi64_epi32 (__m128i a)
+ VPMOVQD xmm1/m128 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -4971,7 +5523,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtpd_epu32 (__m256d a)
+ VCVTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -4996,7 +5551,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -5021,7 +5579,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtepi64_epi32 (__m256i a)
+ VPMOVQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -5046,7 +5607,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtusepi64_epi32 (__m128i a)
+ VPMOVUSQD xmm1/m128 {k1}{z}, xmm2
+
To be added.
To be added.
@@ -5071,7 +5635,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvtusepi64_epi32 (__m256i a)
+ VPMOVUSQD xmm1/m128 {k1}{z}, ymm2
+
To be added.
To be added.
@@ -5096,7 +5663,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvttpd_epu32 (__m128d a)
+ VCVTTPD2UDQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -5121,7 +5691,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvttps_epu32 (__m128 a)
+ VCVTTPS2UDQ xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5146,7 +5719,10 @@
To be added.
- To be added.
+
+ __m128i _mm256_cvttpd_epu32 (__m256d a)
+ VCVTTPD2UDQ xmm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5171,7 +5747,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtpd_epu64 (__m128d a)
+ VCVTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -5196,7 +5775,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvtps_epu64 (__m128 a)
+ VCVTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -5221,7 +5803,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvttpd_epu64 (__m128d a)
+ VCVTTPD2UQQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -5246,7 +5831,10 @@
To be added.
- To be added.
+
+ __m128i _mm_cvttps_epu64 (__m128 a)
+ VCVTTPS2UQQ xmm1 {k1}{z}, xmm2/m64/m32bcst
+
To be added.
To be added.
@@ -5271,7 +5859,10 @@
To be added.
- To be added.
+
+ __m256d _mm512_cvtepu32_pd (__m128i a)
+ VCVTUDQ2PD ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5296,7 +5887,10 @@
To be added.
- To be added.
+
+ __m256d _mm256_cvtepi64_pd (__m256i a)
+ VCVTQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5321,7 +5915,10 @@
To be added.
- To be added.
+
+ __m256d _mm256_cvtepu64_pd (__m256i a)
+ VCVTUQQ2PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5346,7 +5943,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvtps_epi64 (__m128 a)
+ VCVTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5371,7 +5971,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvtpd_epi64 (__m256d a)
+ VCVTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5396,7 +5999,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvttps_epi64 (__m128 a)
+ VCVTTPS2QQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5421,7 +6027,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvttpd_epi64 (__m256d a)
+ VCVTTPD2QQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5446,7 +6055,10 @@
To be added.
- To be added.
+
+ __m256 _mm256_cvtepu32_ps (__m256i a)
+ VCVTUDQ2PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -5471,7 +6083,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvtps_epu32 (__m256 a)
+ VCVTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -5496,7 +6111,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvttps_epu32 (__m256 a)
+ VCVTTPS2UDQ ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -5521,7 +6139,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvtps_epu64 (__m128 a)
+ VCVTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5546,7 +6167,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvtpd_epu64 (__m256d a)
+ VCVTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5571,7 +6195,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvttps_epu64 (__m128 a)
+ VCVTTPS2UQQ ymm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5596,7 +6223,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_cvttpd_epu64 (__m256d a)
+ VCVTTPD2UQQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5621,7 +6251,10 @@
To be added.
- To be added.
+
+ __m128i _mm_conflict_epi32 (__m128i a)
+ VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5646,7 +6279,10 @@
To be added.
- To be added.
+
+ __m128i _mm_conflict_epi64 (__m128i a)
+ VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -5671,7 +6307,10 @@
To be added.
- To be added.
+
+ __m128i _mm_conflict_epi32 (__m128i a)
+ VPCONFLICTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -5696,7 +6335,10 @@
To be added.
- To be added.
+
+ __m128i _mm_conflict_epi64 (__m128i a)
+ VPCONFLICTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -5721,7 +6363,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_conflict_epi32 (__m256i a)
+ VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -5746,7 +6391,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_conflict_epi64 (__m256i a)
+ VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5771,7 +6419,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_conflict_epi32 (__m256i a)
+ VPCONFLICTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -5796,7 +6447,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_conflict_epi64 (__m256i a)
+ VPCONFLICTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -5832,7 +6486,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_div_round_sd (__m128d a, __m128d b, int rounding)
+ VDIVSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -5868,7 +6525,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_div_round_ss (__m128 a, __m128 b, int rounding)
+ VDIVSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -5906,7 +6566,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fixupimm_pd(__m128d a, __m128d b, __m128i tbl, int imm);
+ VFIXUPIMMPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -5944,7 +6607,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fixupimm_ps(__m128 a, __m128 b, __m128i tbl, int imm);
+ VFIXUPIMMPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -5982,7 +6648,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_fixupimm_pd(__m256d a, __m256d b, __m256i tbl, int imm);
+ VFIXUPIMMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -6020,7 +6689,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_fixupimm_ps(__m256 a, __m256 b, __m256i tbl, int imm);
+ VFIXUPIMMPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -6058,7 +6730,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fixupimm_sd(__m128d a, __m128d b, __m128i tbl, int imm);
+ VFIXUPIMMSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -6096,7 +6771,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fixupimm_ss(__m128 a, __m128 b, __m128i tbl, int imm);
+ VFIXUPIMMSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -6134,7 +6812,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fnmadd_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFNMADDSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6172,7 +6853,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fnmadd_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFNMADDSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6210,7 +6894,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fmadd_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFMADDSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6248,7 +6935,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fmadd_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFMADDSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6286,7 +6976,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fnmsub_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFNMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6324,7 +7017,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fnmsub_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFNMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6362,7 +7058,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fmsub_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6400,7 +7099,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fmsub_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -6425,7 +7127,10 @@
To be added.
- To be added.
+
+ __m128d _mm_getexp_pd (__m128d a)
+ VGETEXPPD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -6450,7 +7155,10 @@
To be added.
- To be added.
+
+ __m128 _mm_getexp_ps (__m128 a)
+ VGETEXPPS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -6475,7 +7183,10 @@
To be added.
- To be added.
+
+ __m256d _mm256_getexp_pd (__m256d a)
+ VGETEXPPD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -6500,7 +7211,10 @@
To be added.
- To be added.
+
+ __m256 _mm256_getexp_ps (__m256 a)
+ VGETEXPPS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -6525,7 +7239,10 @@
To be added.
- To be added.
+
+ __m128d _mm_getexp_sd (__m128d a)
+ VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+
To be added.
To be added.
@@ -6550,7 +7267,10 @@
To be added.
- To be added.
+
+ __m128 _mm_getexp_ss (__m128 a)
+ VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+
To be added.
To be added.
@@ -6577,7 +7297,11 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_getexp_sd (__m128d a, __m128d b)
+ VGETEXPSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -6604,7 +7328,11 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_getexp_ss (__m128 a, __m128 b)
+ VGETEXPSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -6638,7 +7366,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_getmant_pd (__m128d a)
+ VGETMANTPD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -6672,7 +7403,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_getmant_ps (__m128 a)
+ VGETMANTPS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -6706,7 +7440,10 @@
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_getmant_pd (__m256d a)
+ VGETMANTPD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -6740,7 +7477,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_getmant_ps (__m256 a)
+ VGETMANTPS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -6774,7 +7514,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_getmant_sd (__m128d a)
+ VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+
To be added.
To be added.
@@ -6808,7 +7551,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_getmant_ss (__m128 a)
+ VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+
To be added.
To be added.
@@ -6844,7 +7590,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_getmant_sd (__m128d a, __m128d b)
+ VGETMANTSD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -6880,7 +7630,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_getmant_ss (__m128 a, __m128 b)
+ VGETMANTSS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -6901,9 +7655,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
@@ -6926,7 +7681,10 @@
To be added.
- To be added.
+
+ __m128i _mm_lzcnt_epi32 (__m128i a)
+ VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -6951,7 +7709,10 @@
To be added.
- To be added.
+
+ __m128i _mm_lzcnt_epi64 (__m128i a)
+ VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -6976,7 +7737,10 @@
To be added.
- To be added.
+
+ __m128i _mm_lzcnt_epi32 (__m128i a)
+ VPLZCNTD xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -7001,7 +7765,10 @@
To be added.
- To be added.
+
+ __m128i _mm_lzcnt_epi64 (__m128i a)
+ VPLZCNTQ xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -7026,7 +7793,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_lzcnt_epi32 (__m256i a)
+ VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -7051,7 +7821,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_lzcnt_epi64 (__m256i a)
+ VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -7076,7 +7849,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_lzcnt_epi32 (__m256i a)
+ VPLZCNTD ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -7101,7 +7877,10 @@
To be added.
- To be added.
+
+ __m256i _mm256_lzcnt_epi64 (__m256i a)
+ VPLZCNTQ ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -7128,7 +7907,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_max_epi64 (__m128i a, __m128i b)
+ VPMAXSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7155,7 +7937,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_max_epu64 (__m128i a, __m128i b)
+ VPMAXUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7182,7 +7967,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_max_epi64 (__m256i a, __m256i b)
+ VPMAXSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7209,7 +7997,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_max_epu64 (__m256i a, __m256i b)
+ VPMAXUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7236,7 +8027,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_min_epi64 (__m128i a, __m128i b)
+ VPMINSQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7263,7 +8057,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_min_epu64 (__m128i a, __m128i b)
+ VPMINUQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7290,7 +8087,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_min_epi64 (__m256i a, __m256i b)
+ VPMINSQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7317,7 +8117,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_min_epu64 (__m256i a, __m256i b)
+ VPMINUQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7344,7 +8147,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_mullo_epi64 (__m128i a, __m128i b)
+ VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7371,7 +8177,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_mullo_epi64 (__m128i a, __m128i b)
+ VPMULLQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7398,7 +8207,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_mullo_epi64 (__m256i a, __m256i b)
+ VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7425,7 +8237,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_mullo_epi64 (__m256i a, __m256i b)
+ VPMULLQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7461,7 +8276,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_mul_round_sd (__m128d a, __m128d b, int rounding)
+ VMULSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7497,7 +8315,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_mul_round_ss (__m128 a, __m128 b, int rounding)
+ VMULSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7524,7 +8345,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b)
+ VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7551,7 +8375,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b)
+ VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7578,7 +8405,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b)
+ VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7605,7 +8435,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b)
+ VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -7632,7 +8465,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b)
+ VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -7659,7 +8495,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutevar16x16_epi16 (__m256i a, __m256i b)
+ VPERMW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -7688,7 +8527,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256
+ VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -7717,7 +8560,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi16 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2W ymm1 {k1}{z}, ymm2, ymm3/m256
+ VPERMT2W ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -7744,7 +8591,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b)
+ VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -7771,7 +8621,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutevar64x8_epi8 (__m128i a, __m128i b)
+ VPERMB xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -7800,7 +8653,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128
+ VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -7829,7 +8686,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi8 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2B xmm1 {k1}{z}, xmm2, xmm3/m128
+ VPERMT2B xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -7858,7 +8719,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_permutex2var_pd (__m128d a, __m128i idx, __m128i b)
+ VPERMI2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+ VPERMT2PD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7887,7 +8752,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+ VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7916,7 +8785,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi64 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+ VPERMT2Q xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -7943,7 +8816,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b)
+ VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -7970,7 +8846,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutevar64x8_epi8 (__m256i a, __m256i b)
+ VPERMB ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -7999,7 +8878,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256
+ VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -8028,7 +8911,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi8 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2B ymm1 {k1}{z}, ymm2, ymm3/m256
+ VPERMT2B ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -8057,7 +8944,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+ VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -8086,7 +8977,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_permutex2var_ps (__m128 a, __m128i idx, __m128i b)
+ VPERMI2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+ VPERMT2PS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -8115,7 +9010,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi32 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+ VPERMT2D xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -8142,7 +9041,10 @@
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_permute4x64_pd (__m256d a, __m256i b)
+ VPERMPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -8169,7 +9071,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permute4x64_epi64 (__m256i a, __m256i b)
+ VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -8196,7 +9101,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permute4x64_pd (__m256d a, __m256i b)
+ VPERMQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -8225,7 +9133,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_permutex2var_pd (__m256d a, __m256i idx, __m256i b)
+ VPERMI2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+ VPERMT2PD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -8254,7 +9166,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+ VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -8283,7 +9199,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi64 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+ VPERMT2Q ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -8310,7 +9230,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b)
+ VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -8337,7 +9260,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutevar8x16_epi16 (__m128i a, __m128i b)
+ VPERMW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -8366,7 +9292,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128
+ VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -8395,7 +9325,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_permutex2var_epi16 (__m128i a, __m128i idx, __m128i b)
+ VPERMI2W xmm1 {k1}{z}, xmm2, xmm3/m128
+ VPERMT2W xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -8424,7 +9358,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+ VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -8453,7 +9391,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_permutex2var_ps (__m256 a, __m256i idx, __m256i b)
+ VPERMI2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+ VPERMT2PS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -8482,7 +9424,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_permutex2var_epi32 (__m256i a, __m256i idx, __m256i b)
+ VPERMI2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+ VPERMT2D ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -8518,7 +9464,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_range_pd(__m128d a, __m128d b, int imm);
+ VRANGEPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -8554,7 +9503,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_range_ps(__m128 a, __m128 b, int imm);
+ VRANGEPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -8590,7 +9542,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_range_pd(__m256d a, __m256d b, int imm);
+ VRANGEPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -8626,7 +9581,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_range_ps(__m256 a, __m256 b, int imm);
+ VRANGEPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -8662,7 +9620,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_range_sd(__m128d a, __m128d b, int imm);
+ VRANGESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -8698,7 +9659,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_range_ss(__m128 a, __m128 b, int imm);
+ VRANGESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -8723,7 +9687,10 @@
To be added.
- To be added.
+
+ __m128d _mm_rcp14_pd (__m128d a, __m128d b)
+ VRCP14PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -8748,7 +9715,10 @@
To be added.
- To be added.
+
+ __m128 _mm_rcp14_ps (__m128 a, __m128 b)
+ VRCP14PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -8773,7 +9743,10 @@
To be added.
- To be added.
+
+ __m256d _mm256_rcp14_pd (__m256d a, __m256d b)
+ VRCP14PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -8798,7 +9771,10 @@
To be added.
- To be added.
+
+ __m256 _mm256_rcp14_ps (__m256 a, __m256 b)
+ VRCP14PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -8823,7 +9799,10 @@
To be added.
- To be added.
+
+ __m128d _mm_rcp14_sd (__m128d a)
+ VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+
To be added.
To be added.
@@ -8848,7 +9827,10 @@
To be added.
- To be added.
+
+ __m128 _mm_rcp14_ss (__m128 a)
+ VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+
To be added.
To be added.
@@ -8875,7 +9857,11 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_rcp14_sd (__m128d a, __m128d b)
+ VRCP14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -8902,7 +9888,11 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_rcp14_ss (__m128 a, __m128 b)
+ VRCP14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -8927,7 +9917,10 @@
To be added.
- To be added.
+
+ __m128d _mm_rsqrt14_pd (__m128d a, __m128d b)
+ VRSQRT14PD xmm1 {k1}{z}, xmm2/m128/m64bcst
+
To be added.
To be added.
@@ -8952,7 +9945,10 @@
To be added.
- To be added.
+
+ __m128 _mm_rsqrt14_ps (__m128 a, __m128 b)
+ VRSQRT14PS xmm1 {k1}{z}, xmm2/m128/m32bcst
+
To be added.
To be added.
@@ -8977,7 +9973,10 @@
To be added.
- To be added.
+
+ __m256d _mm256_rsqrt14_pd (__m256d a, __m256d b)
+ VRSQRT14PD ymm1 {k1}{z}, ymm2/m256/m64bcst
+
To be added.
To be added.
@@ -9002,7 +10001,10 @@
To be added.
- To be added.
+
+ __m256 _mm256_rsqrt14_ps (__m256 a, __m256 b)
+ VRSQRT14PS ymm1 {k1}{z}, ymm2/m256/m32bcst
+
To be added.
To be added.
@@ -9027,7 +10029,10 @@
To be added.
- To be added.
+
+ __m128d _mm_rsqrt14_sd (__m128d a)
+ VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+
To be added.
To be added.
@@ -9052,7 +10057,10 @@
To be added.
- To be added.
+
+ __m128 _mm_rsqrt14_ss (__m128 a)
+ VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+
To be added.
To be added.
@@ -9079,7 +10087,11 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_rsqrt14_sd (__m128d a, __m128d b)
+ VRSQRT14SD xmm1 {k1}{z}, xmm2, xmm3/m64
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9106,7 +10118,11 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_rsqrt14_ss (__m128 a, __m128 b)
+ VRSQRT14SS xmm1 {k1}{z}, xmm2, xmm3/m32
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9140,7 +10156,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_reduce_pd(__m128d a, int imm);
+ VREDUCEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -9174,7 +10193,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_reduce_ps(__m128 a, int imm);
+ VREDUCEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -9208,7 +10230,10 @@
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_reduce_pd(__m256d a, int imm);
+ VREDUCEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -9242,7 +10267,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_reduce_ps(__m256 a, int imm);
+ VREDUCEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -9276,7 +10304,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_reduce_sd(__m128d a, int imm);
+ VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -9310,7 +10341,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_reduce_ss(__m128 a, int imm);
+ VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -9346,7 +10380,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_reduce_sd(__m128d a, __m128d b, int imm);
+ VREDUCESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9382,7 +10420,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_reduce_ss(__m128 a, __m128 b, int imm);
+ VREDUCESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -9416,7 +10458,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rol_epi32 (__m128i a, int imm8)
+ VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -9450,7 +10495,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rol_epi64 (__m128i a, int imm8)
+ VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -9484,7 +10532,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rol_epi32 (__m128i a, int imm8)
+ VPROLD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -9518,7 +10569,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rol_epi64 (__m128i a, int imm8)
+ VPROLQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -9552,7 +10606,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rol_epi32 (__m256i a, int imm8)
+ VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -9586,7 +10643,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rol_epi64 (__m256i a, int imm8)
+ VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -9620,7 +10680,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rol_epi32 (__m256i a, int imm8)
+ VPROLD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -9654,7 +10717,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rol_epi64 (__m256i a, int imm8)
+ VPROLQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -9681,7 +10747,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rolv_epi32 (__m128i a, __m128i b)
+ VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -9708,7 +10777,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rolv_epi64 (__m128i a, __m128i b)
+ VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -9735,7 +10807,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rolv_epi32 (__m128i a, __m128i b)
+ VPROLDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -9762,7 +10837,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rolv_epi64 (__m128i a, __m128i b)
+ VPROLQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -9789,7 +10867,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rolv_epi32 (__m256i a, __m256i b)
+ VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -9816,7 +10897,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rolv_epi64 (__m256i a, __m256i b)
+ VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -9843,7 +10927,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rolv_epi32 (__m256i a, __m256i b)
+ VPROLDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -9870,7 +10957,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rolv_epi64 (__m256i a, __m256i b)
+ VPROLQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -9904,7 +10994,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ror_epi32 (__m128i a, int imm8)
+ VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -9938,7 +11031,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ror_epi64 (__m128i a, int imm8)
+ VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -9972,7 +11068,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ror_epi32 (__m128i a, int imm8)
+ VPRORD xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -10006,7 +11105,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ror_epi64 (__m128i a, int imm8)
+ VPRORQ xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -10040,7 +11142,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ror_epi32 (__m256i a, int imm8)
+ VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -10074,7 +11179,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ror_epi64 (__m256i a, int imm8)
+ VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -10108,7 +11216,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ror_epi32 (__m256i a, int imm8)
+ VPRORD ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -10142,7 +11253,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ror_epi64 (__m256i a, int imm8)
+ VPRORQ ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -10169,7 +11283,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rorv_epi32 (__m128i a, __m128i b)
+ VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -10196,7 +11313,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rorv_epi64 (__m128i a, __m128i b)
+ VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -10223,7 +11343,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rorv_epi32 (__m128i a, __m128i b)
+ VPRORDV xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -10250,7 +11373,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_rorv_epi64 (__m128i a, __m128i b)
+ VPRORQV xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -10277,7 +11403,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rorv_epi32 (__m256i a, __m256i b)
+ VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -10304,7 +11433,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rorv_epi64 (__m256i a, __m256i b)
+ VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -10331,7 +11463,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rorv_epi32 (__m256i a, __m256i b)
+ VPRORDV ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -10358,7 +11493,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_rorv_epi64 (__m256i a, __m256i b)
+ VPRORQV ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -10392,7 +11530,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_roundscale_pd (__m128d a, int imm)
+ VRNDSCALEPD xmm1 {k1}{z}, xmm2/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -10426,7 +11567,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_roundscale_ps (__m128 a, int imm)
+ VRNDSCALEPS xmm1 {k1}{z}, xmm2/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -10460,7 +11604,10 @@
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_roundscale_pd (__m256d a, int imm)
+ VRNDSCALEPD ymm1 {k1}{z}, ymm2/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -10494,7 +11641,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_roundscale_ps (__m256 a, int imm)
+ VRNDSCALEPS ymm1 {k1}{z}, ymm2/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -10528,7 +11678,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_roundscale_sd (__m128d a, int imm)
+ VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+
To be added.
To be added.
@@ -10562,7 +11715,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_roundscale_ss (__m128 a, int imm)
+ VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+
To be added.
To be added.
@@ -10598,7 +11754,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_roundscale_sd (__m128d a, __m128d b, int imm)
+ VRNDSCALESD xmm1 {k1}{z}, xmm2, xmm3/m64{sae}, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -10634,7 +11794,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_roundscale_ss (__m128 a, __m128 b, int imm)
+ VRNDSCALESS xmm1 {k1}{z}, xmm2, xmm3/m32{sae}, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs.
+
To be added.
To be added.
@@ -10661,7 +11825,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_scalef_pd (__m128d a, int imm)
+ VSCALEFPD xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -10688,7 +11855,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_scalef_ps (__m128 a, int imm)
+ VSCALEFPS xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst
+
To be added.
To be added.
@@ -10715,7 +11885,10 @@
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_scalef_pd (__m256d a, int imm)
+ VSCALEFPD ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -10742,7 +11915,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_scalef_ps (__m256 a, int imm)
+ VSCALEFPS ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst
+
To be added.
To be added.
@@ -10769,7 +11945,10 @@
To be added.
To be added.
- To be added.
+
+ __m128d _mm_scalef_sd (__m128d a, __m128d b)
+ VSCALEFSD xmm1 {k1}{z}, xmm2, xmm3/m64{er}
+
To be added.
To be added.
@@ -10796,7 +11975,10 @@
To be added.
To be added.
- To be added.
+
+ __m128 _mm_scalef_ss (__m128 a, __m128 b)
+ VSCALEFSS xmm1 {k1}{z}, xmm2, xmm3/m32{er}
+
To be added.
To be added.
@@ -10832,7 +12014,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_scalef_round_sd (__m128d a, __m128d b)
+ VSCALEFSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -10868,7 +12053,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_scalef_round_ss (__m128 a, __m128 b)
+ VSCALEFSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -10895,7 +12083,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_sllv_epi16 (__m128i a, __m128i count)
+ VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -10922,7 +12113,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_sllv_epi16 (__m128i a, __m128i count)
+ VPSLLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -10949,7 +12143,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_sllv_epi16 (__m256i a, __m256i count)
+ VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -10976,7 +12173,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_sllv_epi16 (__m256i a, __m256i count)
+ VPSLLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -11010,7 +12210,10 @@
To be added.
To be added.
- To be added.
+
+ __128i _mm_srai_epi64 (__m128i a, int imm8)
+ VPSRAQ xmm1 {k1}{z}, xmm2, imm8
+
To be added.
To be added.
@@ -11037,7 +12240,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_sra_epi64 (__m128i a, __m128i count)
+ VPSRAQ xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -11071,7 +12277,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_srai_epi64 (__m256i a, int imm8)
+ VPSRAQ ymm1 {k1}{z}, ymm2, imm8
+
To be added.
To be added.
@@ -11098,7 +12307,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_sra_epi64 (__m256i a, __m128i count)
+ VPSRAQ ymm1 {k1}{z}, ymm2, xmm3/m128
+
To be added.
To be added.
@@ -11125,7 +12337,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_srav_epi16 (__m128i a, __m128i count)
+ VPSRAVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -11152,7 +12367,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_srav_epi64 (__m128i a, __m128i count)
+ VPSRAVQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -11179,7 +12397,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_srav_epi16 (__m256i a, __m256i count)
+ VPSRAVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -11206,7 +12427,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_srav_epi64 (__m256i a, __m256i count)
+ VPSRAVQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -11233,7 +12457,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_srlv_epi16 (__m128i a, __m128i count)
+ VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -11260,7 +12487,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_srlv_epi16 (__m128i a, __m128i count)
+ VPSRLVW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -11287,7 +12517,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_srlv_epi16 (__m256i a, __m256i count)
+ VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -11314,7 +12547,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_srlv_epi16 (__m256i a, __m256i count)
+ VPSRLVW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -11350,7 +12586,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_shuffle_f64x2 (__m256d a, __m256d b, const int imm8)
+ VSHUFF64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -11386,7 +12625,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)
+ VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -11422,7 +12664,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)
+ VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -11458,7 +12703,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_shuffle_f32x4 (__m256 a, __m256 b, const int imm8)
+ VSHUFF32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -11494,7 +12742,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_shuffle_i32x4 (__m256i a, __m256i b, const int imm8)
+ VSHUFI32x4 ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -11530,7 +12781,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_shuffle_i64x2 (__m256i a, __m256i b, const int imm8)
+ VSHUFI64x2 ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -11566,7 +12820,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_sqrt_round_sd (__m128d a, __m128d b, int rounding)
+ VSQRTSD xmm1, xmm2 xmm3 {er}
+
To be added.
To be added.
@@ -11602,7 +12859,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_sqrt_round_ss (__m128 a, __m128 b, int rounding)
+ VSQRTSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -11638,7 +12898,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_sub_round_sd (__m128d a, __m128d b, int rounding)
+ VSUBSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -11674,7 +12937,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_sub_round_ss (__m128 a, __m128 b, int rounding)
+ VSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -11710,7 +12976,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_dbsad_epu8 (__m128i a, __m128i b, int imm8)
+ VDBPSADBW xmm1 {k1}{z}, xmm2, xmm3/m128
+
To be added.
To be added.
@@ -11746,7 +13015,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_dbsad_epu8 (__m256i a, __m256i b, int imm8)
+ VDBPSADBW ymm1 {k1}{z}, ymm2, ymm3/m256
+
To be added.
To be added.
@@ -11784,7 +13056,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -11822,7 +13098,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_ternarylogic_pd (__m128d a, __m128d b, __m128d c, int imm)
+ VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -11860,7 +13140,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -11898,7 +13182,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -11936,7 +13223,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -11974,7 +13264,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, byte imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12012,7 +13306,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_ternarylogic_ps (__m128 a, __m128 b, __m128 c, int imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12050,7 +13348,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_si128 (__m128i a, __m128i b, __m128i c, short imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12088,7 +13390,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_epi32 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGD xmm1 {k1}{z}, xmm2, xmm3/m128/m32bcst, imm8
+
To be added.
To be added.
@@ -12126,7 +13431,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128i _mm_ternarylogic_epi64 (__m128i a, __m128i b, __m128i c, int imm)
+ VPTERNLOGQ xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst, imm8
+
To be added.
To be added.
@@ -12164,7 +13472,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12202,7 +13514,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256d _mm256_ternarylogic_pd (__m256d a, __m256d b, __m256d c, int imm)
+ VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12240,7 +13556,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12278,7 +13598,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -12316,7 +13639,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
@@ -12354,7 +13680,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, byte imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12392,7 +13722,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256 _mm256_ternarylogic_ps (__m256 a, __m256 b, __m256 c, int imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12430,7 +13764,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_si256 (__m256i a, __m256i b, __m256i c, short imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256, imm8
+ The above native signature does not exist. We provide this additional overload for consistency with the other bitwise APIs.
+
To be added.
To be added.
@@ -12468,7 +13806,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_epi32 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGD ymm1 {k1}{z}, ymm2, ymm3/m256/m32bcst, imm8
+
To be added.
To be added.
@@ -12506,7 +13847,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_ternarylogic_epi64 (__m256i a, __m256i b, __m256i c, int imm)
+ VPTERNLOGQ ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst, imm8
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx2+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx2+X64.xml
index b0b51775253..a9c997c97a3 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx2+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx2+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 AVX2 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx2.xml b/xml/System.Runtime.Intrinsics.X86/Avx2.xml
index a3cfcbac83f..cde346aa7f2 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx2.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx2.xml
@@ -8480,9 +8480,10 @@ The native signature doesn't exist. This additional overload is provided for com
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml
index 3a17dc673e9..fcda880b950 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512BW+VL.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512BW+VL hardware instructions via intrinsics.
To be added.
@@ -1500,9 +1500,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512BW+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx512BW+X64.xml
index a8694c33a9c..076ace4d3c2 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512BW+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512BW+X64.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512BW hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -36,9 +36,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml b/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml
index 332afa4be06..2c3362f91e5 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512BW.xml
@@ -1772,9 +1772,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml
index a9d7759c285..d6d0ac93434 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512CD+VL.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512CD+VL hardware instructions via intrinsics.
To be added.
@@ -268,9 +268,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512CD+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx512CD+X64.xml
index b3fa7bddc5d..8887d3faece 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512CD+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512CD+X64.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512CD hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -36,9 +36,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml b/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml
index d53708f8e01..a254b4a0ab9 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512CD.xml
@@ -158,9 +158,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml
index a4f215e3883..dc050f3d640 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512DQ+VL.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512DQ+VL hardware instructions via intrinsics.
To be added.
@@ -877,9 +877,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512DQ+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx512DQ+X64.xml
index cc22e8ccab5..c9a271062ab 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512DQ+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512DQ+X64.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512DQ hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -36,9 +36,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml b/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml
index 3f385ca9575..fc6c4552100 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512DQ.xml
@@ -491,7 +491,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm512_cvt_roundepi64_ps (__m512i a, int r)
+ VCVTQQ2PS ymm1, zmm2 {er}
+
To be added.
To be added.
@@ -525,7 +528,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm512_cvt_roundepu64_ps (__m512i a, int r)
+ VCVTUQQ2PS ymm1, zmm2 {er}
+
To be added.
To be added.
@@ -617,7 +623,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_cvt_roundepi64_pd (__m512i a, int r)
+ VCVTQQ2PD zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -651,7 +660,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_cvt_roundepu64_pd (__m512i a, int r)
+ VCVTUQQ2PD zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -743,7 +755,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundps_epi64 (__m512 a, int r)
+ VCVTPS2QQ zmm1, ymm2 {er}
+
To be added.
To be added.
@@ -777,7 +792,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundpd_epi64 (__m512d a, int r)
+ VCVTPD2QQ zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -927,7 +945,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundps_epu64 (__m512 a, int r)
+ VCVTPS2UQQ zmm1 {k1}{z}, ymm2/m256/m32bcst{er}
+
To be added.
To be added.
@@ -961,7 +982,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundpd_epu64 (__m512d a, int r)
+ VCVTPD2UQQ zmm1 {k1}{z}, zmm2/m512/m64bcst{er}
+
To be added.
To be added.
@@ -1509,9 +1533,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml
index c770be7b21d..24476131098 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512F+VL.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512F+VL hardware instructions via intrinsics.
To be added.
@@ -4050,9 +4050,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml
index c7e8411331e..2349adf8c7f 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512F+X64.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512F hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -81,7 +81,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_cvt_roundsi64_sd (__m128d a, __int64 b, int rounding)
+ VCVTSI2SD xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -117,7 +121,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_cvt_roundu64_sd (__m128d a, unsigned __int64 b, int rounding)
+ VCVTUSI2SD xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -184,7 +192,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundi64_ss (__m128 a, __int64 b, int rounding)
+ VCVTSI2SS xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -220,7 +232,11 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundu64_ss (__m128 a, unsigned __int64 b, int rounding)
+ VCVTUSI2SS xmm1, xmm2, r64 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -254,7 +270,11 @@
To be added.
To be added.
- To be added.
+
+ __int64 _mm_cvt_roundsd_i64 (__m128d a, int rounding)
+ VCVTSD2SI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -288,7 +308,11 @@
To be added.
To be added.
- To be added.
+
+ __int64 _mm_cvt_roundss_i64 (__m128 a, int rounding)
+ VCVTSS2SI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -380,7 +404,11 @@
To be added.
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvt_roundsd_u64 (__m128d a, int rounding)
+ VCVTSD2USI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -414,7 +442,11 @@
To be added.
To be added.
- To be added.
+
+ unsigned __int64 _mm_cvt_roundss_u64 (__m128 a, int rounding)
+ VCVTSS2USI r64, xmm1 {er}
+ This intrinsic is only available on 64-bit processes
+
To be added.
To be added.
@@ -494,9 +526,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512F.xml b/xml/System.Runtime.Intrinsics.X86/Avx512F.xml
index 345e74ab6d9..2e85239ebf9 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512F.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512F.xml
@@ -300,7 +300,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_add_round_pd (__m512d a, __m512d b, int rounding)
+ VADDPD zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -336,7 +339,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_add_round_ps (__m512 a, __m512 b, int rounding)
+ VADDPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -372,7 +378,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_add_round_sd (__m128d a, __m128d b, int rounding)
+ VADDSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -408,7 +417,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_add_round_ss (__m128 a, __m128 b, int rounding)
+ VADDSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -3270,7 +3282,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding)
+ VCVTSI2SS xmm1, xmm2, r32 {er}
+
To be added.
To be added.
@@ -3306,7 +3321,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundsd_ss (__m128 a, __m128d b, int rounding)
+ VCVTSD2SS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -3342,7 +3360,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_cvt_roundi32_ss (__m128 a, int b, int rounding)
+ VCVTUSI2SS xmm1, xmm2, r32 {er}
+
To be added.
To be added.
@@ -3376,7 +3397,10 @@
To be added.
To be added.
- To be added.
+
+ int _mm_cvt_roundsd_i32 (__m128d a, int rounding)
+ VCVTSD2SI r32, xmm1 {er}
+
To be added.
To be added.
@@ -3410,7 +3434,10 @@
To be added.
To be added.
- To be added.
+
+ int _mm_cvt_roundss_i32 (__m128 a, int rounding)
+ VCVTSS2SIK r32, xmm1 {er}
+
To be added.
To be added.
@@ -3502,7 +3529,10 @@
To be added.
To be added.
- To be added.
+
+ unsigned int _mm_cvt_roundsd_u32 (__m128d a, int rounding)
+ VCVTSD2USI r32, xmm1 {er}
+
To be added.
To be added.
@@ -3536,7 +3566,10 @@
To be added.
To be added.
- To be added.
+
+ unsigned int _mm_cvt_roundss_u32 (__m128 a, int rounding)
+ VCVTSS2USI r32, xmm1 {er}
+
To be added.
To be added.
@@ -4324,7 +4357,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm512_cvt_roundpd_epi32 (__m512d a, int rounding)
+ VCVTPD2DQ ymm1, zmm2 {er}
+
To be added.
To be added.
@@ -4445,7 +4481,10 @@
To be added.
To be added.
- To be added.
+
+ __m256 _mm512_cvt_roundpd_ps (__m512d a, int rounding)
+ VCVTPD2PS ymm1, zmm2 {er}
+
To be added.
To be added.
@@ -4653,7 +4692,7 @@
To be added.
To be added.
- To be added.
+ <para>__m256i _mm512_cvt_roundpd_epu32 (__m512d a, int rounding) <para> VCVTPD2UDQ ymm1, zmm2 {er}</para>
To be added.
To be added.
@@ -4977,7 +5016,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundps_epi32 (__m512 a, int rounding)
+ VCVTPS2DQ zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -5272,7 +5314,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_cvt_roundepi32_ps (__m512i a, int rounding)
+ VCVTDQ2PS zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -5306,7 +5351,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_cvt_roundepi32_ps (__m512i a, int rounding)
+ VCVTUDQ2PS zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -5485,7 +5533,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_cvt_roundps_epu32 (__m512 a, int rounding)
+ VCVTPS2UDQ zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -5786,7 +5837,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_div_round_pd (__m512d a, __m512d b, int rounding)
+ VDIVPD zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -5822,7 +5876,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_div_round_ps (__m512 a, __m512 b, int rounding)
+ VDIVPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -5858,7 +5915,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_div_round_sd (__m128d a, __m128d b, int rounding)
+ VDIVSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -5894,7 +5954,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_div_round_ss (__m128 a, __m128 b, int rounding)
+ VDIVSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7009,7 +7072,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_fmadd_round_pd (__m512d a, __m512d b, __m512d c, int r)
+ VFMADDPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7047,7 +7113,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_fmadd_round_ps (__m512 a, __m512 b, __m512 c, int r)
+ VFMADDPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7151,7 +7220,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_fnmadd_round_pdd (__m512d a, __m512d b, __m512d c, int r)
+ VFNMADDPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7189,7 +7261,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_fnmadd_round_ps (__m512 a, __m512 b, __m512 c, int r)
+ VFNMADDPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7227,7 +7302,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fnmadd_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFNMADDSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7265,7 +7343,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fnmadd_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFNMADDSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7303,7 +7384,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fmadd_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFMADDSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7341,7 +7425,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fmadd_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFMADDSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7445,7 +7532,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_fmaddsub_pd (__m512d a, __m512d b, __m512d c, int c)
+ VFMADDSUBPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7483,7 +7573,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_fmaddsub_ps (__m512 a, __m512 b, __m512 c, int c)
+ VFMADDSUBPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7587,7 +7680,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_fmsub_round_pd (__m512d a, __m512d b, __m512d c, int r)
+ VFMSUBPD zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7625,7 +7721,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_fmsub_round_ps (__m512 a, __m512 b, __m512 c, int r)
+ VFMSUBPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7729,7 +7828,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_fmsubadd_round_ps (__m512d a, __m512d b, __m512d c)
+ VFMSUBADDPD zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7767,7 +7869,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_fmsubadd_round_ps (__m512 a, __m512 b, __m512 c)
+ VFMSUBADDPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7871,7 +7976,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_fnmsub_round_pd (__m512d a, __m512d b, __m512d c, int r)
+ VFNMSUBPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7909,7 +8017,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_fnmsub_round_ps (__m512 a, __m512 b, __m512 c, int r)
+ VFNMSUBPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -7947,7 +8058,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fnmsub_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFNMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -7985,7 +8099,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fnmsub_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFNMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -8023,7 +8140,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_fmsub_round_sd (__m128d a, __m128d b, __m128d c, int r)
+ VFMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -8061,7 +8181,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_fmsub_round_ss (__m128 a, __m128 b, __m128 c, int r)
+ VFMSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -9293,9 +9416,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
@@ -10609,7 +10733,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_mul_round_pd (__m512d a, __m512d b, int rounding)
+ VMULPD zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -10645,7 +10772,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_mul_round_ps (__m512 a, __m512 b, int rounding)
+ VMULPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -10743,7 +10873,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_mul_round_sd (__m128d a, __m128d b, int rounding)
+ VMULSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -10779,7 +10912,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_mul_round_ss (__m128 a, __m128 b, int rounding)
+ VMULSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -12901,7 +13037,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_scalef_round_pd (__m512d a, __m512d b, int rounding)
+ VSCALEFPD zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -12937,7 +13076,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_scalef_round_ps (__m512 a, __m512 b, int rounding)
+ VSCALEFPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -13035,7 +13177,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_scalef_round_sd (__m128d a, __m128d b)
+ VSCALEFSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -13071,7 +13216,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_scalef_round_ss (__m128 a, __m128 b)
+ VSCALEFSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -14559,7 +14707,10 @@
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_sqrt_round_pd (__m512d a, int rounding)
+ VSQRTPD zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -14593,7 +14744,10 @@
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_sqrt_round_ps (__m512 a, int rounding)
+ VSQRTPS zmm1, zmm2 {er}
+
To be added.
To be added.
@@ -14629,7 +14783,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_sqrt_round_sd (__m128d a, __m128d b, int rounding)
+ VSQRTSD xmm1, xmm2 xmm3 {er}
+
To be added.
To be added.
@@ -14665,7 +14822,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_sqrt_round_ss (__m128 a, __m128 b, int rounding)
+ VSQRTSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -15757,7 +15917,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512d _mm512_sub_round_pd (__m512d a, __m512d b, int rounding)
+ VSUBPD zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -15793,7 +15956,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m512 _mm512_sub_round_ps (__m512 a, __m512 b, int rounding)
+ VSUBPS zmm1, zmm2, zmm3 {er}
+
To be added.
To be added.
@@ -15829,7 +15995,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128d _mm_sub_round_sd (__m128d a, __m128d b, int rounding)
+ VSUBSD xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
@@ -15865,7 +16034,10 @@
To be added.
To be added.
To be added.
- To be added.
+
+ __m128 _mm_sub_round_ss (__m128 a, __m128 b, int rounding)
+ VSUBSS xmm1, xmm2, xmm3 {er}
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml
index 01d4af6ff29..e191bccd9f5 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+VL.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512VBMI+VL hardware instructions via intrinsics.
To be added.
@@ -36,9 +36,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
@@ -63,7 +64,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b)
+ VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -90,7 +94,10 @@
To be added.
To be added.
- To be added.
+
+ __m128i _mm_multishift_epi64_epi8(__m128i a, __m128i b)
+ VPMULTISHIFTQB xmm1 {k1}{z}, xmm2, xmm3/m128/m64bcst
+
To be added.
To be added.
@@ -117,7 +124,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b)
+ VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
@@ -144,7 +154,10 @@
To be added.
To be added.
- To be added.
+
+ __m256i _mm256_multishift_epi64_epi8(__m256i a, __m256i b)
+ VPMULTISHIFTQB ymm1 {k1}{z}, ymm2, ymm3/m256/m64bcst
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+X64.xml b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+X64.xml
index 97e2bb366c6..0e23400d27d 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi+X64.xml
@@ -15,7 +15,7 @@
- To be added.
+ Provides access to the x86 AVX512VBMI hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -36,9 +36,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml
index e1f3bf0eebf..6be458e6964 100644
--- a/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Avx512Vbmi.xml
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
@@ -69,7 +70,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_multishift_epi64_epi8(__m512i a, __m512i b)
+ VPMULTISHIFTQB zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
@@ -96,7 +100,10 @@
To be added.
To be added.
- To be added.
+
+ __m512i _mm512_multishift_epi64_epi8(__m512i a, __m512i b)
+ VPMULTISHIFTQB zmm1 {k1}{z}, zmm2, zmm3/m512/m64bcst
+
To be added.
To be added.
diff --git a/xml/System.Runtime.Intrinsics.X86/AvxVnni+X64.xml b/xml/System.Runtime.Intrinsics.X86/AvxVnni+X64.xml
index 419dd9dfcee..558d5f5f9dd 100644
--- a/xml/System.Runtime.Intrinsics.X86/AvxVnni+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/AvxVnni+X64.xml
@@ -17,7 +17,7 @@
- To be added.
+ Provides access to the x86 AVXVNNI hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -40,9 +40,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/AvxVnni.xml b/xml/System.Runtime.Intrinsics.X86/AvxVnni.xml
index 28c567cfdfa..bcaf7cc8a7f 100644
--- a/xml/System.Runtime.Intrinsics.X86/AvxVnni.xml
+++ b/xml/System.Runtime.Intrinsics.X86/AvxVnni.xml
@@ -27,7 +27,7 @@
- To be added.
+ Provides access to the x86 AVXVNNI hardware instructions via intrinsics.
To be added.
@@ -50,9 +50,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Bmi1+X64.xml b/xml/System.Runtime.Intrinsics.X86/Bmi1+X64.xml
index a0e9d901339..9ae07a7d5ad 100644
--- a/xml/System.Runtime.Intrinsics.X86/Bmi1+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Bmi1+X64.xml
@@ -26,7 +26,7 @@
- To be added.
+ Provides access to the x86 BMI1 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -270,9 +270,10 @@ This intrinsic is only available on 64-bit processes.
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Bmi1.xml b/xml/System.Runtime.Intrinsics.X86/Bmi1.xml
index b47c0d2df94..2dfa3ea0556 100644
--- a/xml/System.Runtime.Intrinsics.X86/Bmi1.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Bmi1.xml
@@ -236,9 +236,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Bmi2+X64.xml b/xml/System.Runtime.Intrinsics.X86/Bmi2+X64.xml
index 09fb1fb36fb..52bd1778014 100644
--- a/xml/System.Runtime.Intrinsics.X86/Bmi2+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Bmi2+X64.xml
@@ -26,7 +26,7 @@
- To be added.
+ Provides access to the x86 BMI2 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -52,9 +52,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Bmi2.xml b/xml/System.Runtime.Intrinsics.X86/Bmi2.xml
index 1bcf3a140b2..48920dfb2c9 100644
--- a/xml/System.Runtime.Intrinsics.X86/Bmi2.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Bmi2.xml
@@ -58,9 +58,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/FloatRoundingMode.xml b/xml/System.Runtime.Intrinsics.X86/FloatRoundingMode.xml
index 87507bb09b9..2fe99db78db 100644
--- a/xml/System.Runtime.Intrinsics.X86/FloatRoundingMode.xml
+++ b/xml/System.Runtime.Intrinsics.X86/FloatRoundingMode.xml
@@ -34,7 +34,9 @@
8
- To be added.
+
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC
+
@@ -54,7 +56,9 @@
9
- To be added.
+
+ _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC
+
@@ -74,7 +78,9 @@
10
- To be added.
+
+ _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC
+
@@ -94,7 +100,9 @@
11
- To be added.
+
+ _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC
+
diff --git a/xml/System.Runtime.Intrinsics.X86/Fma+X64.xml b/xml/System.Runtime.Intrinsics.X86/Fma+X64.xml
index 530c0c2fb0b..df6eb7df714 100644
--- a/xml/System.Runtime.Intrinsics.X86/Fma+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Fma+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 FMA hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Fma.xml b/xml/System.Runtime.Intrinsics.X86/Fma.xml
index 4868136b1b8..662e371340a 100644
--- a/xml/System.Runtime.Intrinsics.X86/Fma.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Fma.xml
@@ -52,9 +52,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Lzcnt+X64.xml b/xml/System.Runtime.Intrinsics.X86/Lzcnt+X64.xml
index 2300e2e2004..d6529cdb58c 100644
--- a/xml/System.Runtime.Intrinsics.X86/Lzcnt+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Lzcnt+X64.xml
@@ -26,7 +26,7 @@
- To be added.
+ Provides access to the x86 LZCNT hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -52,9 +52,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Lzcnt.xml b/xml/System.Runtime.Intrinsics.X86/Lzcnt.xml
index a96ad9efb60..7ac87a2afc4 100644
--- a/xml/System.Runtime.Intrinsics.X86/Lzcnt.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Lzcnt.xml
@@ -58,9 +58,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Pclmulqdq+X64.xml b/xml/System.Runtime.Intrinsics.X86/Pclmulqdq+X64.xml
index 3f5c983b448..f981075e454 100644
--- a/xml/System.Runtime.Intrinsics.X86/Pclmulqdq+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Pclmulqdq+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 CLMUL hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Pclmulqdq.xml b/xml/System.Runtime.Intrinsics.X86/Pclmulqdq.xml
index 03ac28b3fe2..18d998d0724 100644
--- a/xml/System.Runtime.Intrinsics.X86/Pclmulqdq.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Pclmulqdq.xml
@@ -142,9 +142,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Popcnt+X64.xml b/xml/System.Runtime.Intrinsics.X86/Popcnt+X64.xml
index b45b04ae4f9..a366f1789c3 100644
--- a/xml/System.Runtime.Intrinsics.X86/Popcnt+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Popcnt+X64.xml
@@ -26,7 +26,7 @@
- To be added.
+ Provides access to the x86 POPCNT hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -52,9 +52,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Popcnt.xml b/xml/System.Runtime.Intrinsics.X86/Popcnt.xml
index d6417f45455..4068d75085d 100644
--- a/xml/System.Runtime.Intrinsics.X86/Popcnt.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Popcnt.xml
@@ -52,9 +52,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse+X64.xml b/xml/System.Runtime.Intrinsics.X86/Sse+X64.xml
index 3bc41120f9f..6578b1fd0c1 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse+X64.xml
@@ -26,7 +26,7 @@
- To be added.
+ Provides access to the x86 SSE hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -180,9 +180,10 @@ This intrinsic is only available on 64-bit processes.
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse.xml b/xml/System.Runtime.Intrinsics.X86/Sse.xml
index 132158d84d4..14b4548f509 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse.xml
@@ -1674,9 +1674,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse2+X64.xml b/xml/System.Runtime.Intrinsics.X86/Sse2+X64.xml
index 13d9982af56..5ef7fe1e5a7 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse2+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse2+X64.xml
@@ -20,7 +20,7 @@
- To be added.
+ Provides access to the x86 SSE2 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -342,9 +342,10 @@ This intrinsic is only available on 64-bit processes.
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse2.xml b/xml/System.Runtime.Intrinsics.X86/Sse2.xml
index 2714af858a6..1d0c17d083f 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse2.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse2.xml
@@ -3793,9 +3793,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse3+X64.xml b/xml/System.Runtime.Intrinsics.X86/Sse3+X64.xml
index 2f27839312b..6bf589a54c3 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse3+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse3+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 SSE3 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse3.xml b/xml/System.Runtime.Intrinsics.X86/Sse3.xml
index b1a42ae4cc6..d4509c5cbcc 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse3.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse3.xml
@@ -268,9 +268,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse41+X64.xml b/xml/System.Runtime.Intrinsics.X86/Sse41+X64.xml
index 176618b45b1..5adcb9e74f7 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse41+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse41+X64.xml
@@ -26,7 +26,7 @@
- To be added.
+ Provides access to the x86 SSE4.1 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -260,9 +260,10 @@ This intrinsic is only available on 64-bit processes.
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse41.xml b/xml/System.Runtime.Intrinsics.X86/Sse41.xml
index a5d6bec260b..db96af19918 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse41.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse41.xml
@@ -2531,9 +2531,10 @@ The native signature doesn't exist. This additional overload is provided for com
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse42+X64.xml b/xml/System.Runtime.Intrinsics.X86/Sse42+X64.xml
index b3eda8c684d..61f0e27f11d 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse42+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse42+X64.xml
@@ -20,7 +20,7 @@
- To be added.
+ Provides access to the x86 SSE4.2 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -90,9 +90,10 @@ This intrinsic is only available on 64-bit processes.
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Sse42.xml b/xml/System.Runtime.Intrinsics.X86/Sse42.xml
index c6017e68d88..eaf84a8160c 100644
--- a/xml/System.Runtime.Intrinsics.X86/Sse42.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Sse42.xml
@@ -196,9 +196,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Ssse3+X64.xml b/xml/System.Runtime.Intrinsics.X86/Ssse3+X64.xml
index 4669d2902c3..4b6d458fd8d 100644
--- a/xml/System.Runtime.Intrinsics.X86/Ssse3+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Ssse3+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 SSSE3 hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -42,9 +42,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/Ssse3.xml b/xml/System.Runtime.Intrinsics.X86/Ssse3.xml
index 0a5c7f28078..aebfa2584c0 100644
--- a/xml/System.Runtime.Intrinsics.X86/Ssse3.xml
+++ b/xml/System.Runtime.Intrinsics.X86/Ssse3.xml
@@ -786,9 +786,10 @@ This intrinsic generates PALIGNR that operates over bytes rather than elements o
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml b/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml
index 0653970ec56..c29e8b2fe97 100644
--- a/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/X86Base+X64.xml
@@ -18,7 +18,7 @@
- To be added.
+ Provides access to the x86 base hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -130,9 +130,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/X86Base.xml b/xml/System.Runtime.Intrinsics.X86/X86Base.xml
index ff10de30fce..f556a83e77e 100644
--- a/xml/System.Runtime.Intrinsics.X86/X86Base.xml
+++ b/xml/System.Runtime.Intrinsics.X86/X86Base.xml
@@ -253,9 +253,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/X86Serialize+X64.xml b/xml/System.Runtime.Intrinsics.X86/X86Serialize+X64.xml
index fef9c668ca4..772b3ebb556 100644
--- a/xml/System.Runtime.Intrinsics.X86/X86Serialize+X64.xml
+++ b/xml/System.Runtime.Intrinsics.X86/X86Serialize+X64.xml
@@ -16,7 +16,7 @@
- To be added.
+ Provides access to the x86 SERIALIZE hardware instructions, that are only available to 64-bit processes, via intrinsics.
To be added.
@@ -38,9 +38,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics.X86/X86Serialize.xml b/xml/System.Runtime.Intrinsics.X86/X86Serialize.xml
index 63ac35e0746..d77c8274d0e 100644
--- a/xml/System.Runtime.Intrinsics.X86/X86Serialize.xml
+++ b/xml/System.Runtime.Intrinsics.X86/X86Serialize.xml
@@ -44,9 +44,10 @@
System.Boolean
- To be added.
- To be added.
- To be added.
+ Gets a value that indicates whether the APIs in this class are supported.
+
+ if the APIs are supported; otherwise, .
+ A value of indicates that the APIs will throw .
diff --git a/xml/System.Runtime.Intrinsics/Vector128.xml b/xml/System.Runtime.Intrinsics/Vector128.xml
index b87d8765570..ef39f4145b2 100644
--- a/xml/System.Runtime.Intrinsics/Vector128.xml
+++ b/xml/System.Runtime.Intrinsics/Vector128.xml
@@ -1281,9 +1281,10 @@
- To be added.
- To be added.
- To be added.
+ The vector to reinterpret.
+ Reinterprets a as a new , leaving the new elements undefined.
+
+ reinterpreted as a new .
To be added.
@@ -1306,9 +1307,10 @@
- To be added.
- To be added.
- To be added.
+ The vector to reinterpret.
+ Reinterprets a as a new , leaving the new elements undefined.
+
+ reinterpreted as a new .
To be added.
@@ -1557,6 +1559,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1584,6 +1587,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1644,12 +1648,13 @@
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to restrict.
+ The minimum value.
+ The maximum value.
+ Restricts a vector between a minimum and a maximum value.
+ The restricted vector.
To be added.
+ The type of the elements in the vector () is not supported.
@@ -1710,12 +1715,13 @@
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to restrict.
+ The minimum value.
+ The maximum value.
+ Restricts a vector between a minimum and a maximum value using platform specific behavior for NaN and NegativeZero..
+ The restricted vector.
To be added.
+ The type of the elements in the vector () is not supported.
@@ -1898,9 +1904,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1950,9 +1956,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -2074,9 +2080,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -2138,9 +2144,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -2194,10 +2200,10 @@
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector whose magnitude is used in the result.
+ The vector whose sign is used in the result.
+ Copies the per-element sign of a vector to the per-element sign of another vector.
+ A vector with the magnitude of and the sign of .
To be added.
@@ -2425,6 +2431,7 @@
To be added.
To be added.
To be added.
+
@@ -2450,6 +2457,7 @@
To be added.
To be added.
To be added.
+
@@ -3982,11 +3990,12 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that the lower and upper 64-bits will be initialized to.
+ Creates a new instance with the lower and upper 64-bits initialized to a specified value.
+ A new with the lower and upper 64-bits initialized to .
To be added.
+ The type of () is not supported.
@@ -5151,11 +5160,11 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that element 0 will be initialized to.
+ The value that indicates how far apart each element should be from the previous.
+ Creates a new instance where the elements begin at a specified value and which are spaced apart according to another specified value.
+ A new instance with the first element initialized to and each subsequent element initialized to the value of the previous element plus .
To be added.
@@ -5182,6 +5191,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5207,6 +5217,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5623,6 +5634,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5648,6 +5660,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5735,6 +5748,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -5762,6 +5776,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -5791,6 +5806,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5820,6 +5836,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6431,6 +6448,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6458,6 +6476,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6534,9 +6553,9 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be checked.
+ Determines which elements in a vector are NaN.
+ A vector whose elements are all-bits-set or zero, depending on if the corresponding elements in were NaN.
To be added.
@@ -6582,9 +6601,9 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be checked.
+ Determines which elements in a vector represents negative real numbers.
+ A vector whose elements are all-bits-set or zero, depending on if the corresponding elements in were negative.
To be added.
@@ -6630,9 +6649,9 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be checked.
+ Determines which elements in a vector represents positive real numbers.
+ A vector whose elements are all-bits-set or zero, depending on if the corresponding elements in were positive.
To be added.
@@ -6678,9 +6697,9 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be checked.
+ Determines which elements in a vector are positive infinity.
+ A vector whose elements are all-bits-set or zero, depending on if the corresponding elements in were positive infinity.
To be added.
@@ -6726,9 +6745,9 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be checked.
+ Determines which elements in a vector are zero.
+ A vector whose elements are all-bits-set or zero, depending on if the corresponding elements in were zero.
To be added.
@@ -6759,6 +6778,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6788,6 +6808,7 @@ On x86, this method corresponds to __m128i _mm_setr_epi8
To be added.
To be added.
To be added.
+
@@ -7519,6 +7540,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7544,6 +7566,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7569,6 +7592,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7594,6 +7618,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7657,7 +7682,7 @@ This method may bypass the cache on certain platforms.
The vector to compare with .
Computes the maximum of two vectors on a per-element basis.
A vector whose elements are the maximum of the corresponding elements in and .
- To be added.
+ For this method matches the IEEE 754:2019 maximum function.This requires NaN inputs to be propagated back to the caller and for -0.0 to be treated as less than +0.0.
The type of and () is not supported.
@@ -7711,11 +7736,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compares two vectors to compute which has the greater magnitude on a per-element basis.
+ A vector where the corresponding element comes from if it has a greater magnitude than ; otherwise, .
+ For this method matches the IEEE 754:2019 maximumMagnitude function. This requires NaN inputs to be propagated back to the caller and for -0.0 to be treated as less than +0.0.
+ The type of the elements in the vector () is not supported.
@@ -7768,11 +7794,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compares two vectors, on a per-element basis, to compute which has the greater magnitude and returning the other value if an input is NaN.
+ A vector where the corresponding element comes from if it has a greater magnitude than ; otherwise, .
+ For this method matches the IEEE 754:2019 maximumMagnitudeNumber function. This requires NaN inputs to not be propagated back to the caller and for -0.0 to be treated as less than +0.0.
+ The type of the elements in the vector () is not supported.
@@ -7825,11 +7852,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compare two vectors to determine which is greater on a per-element basis using platform specific behavior for NaN and NegativeZero.
+ A vector where the corresponding element comes from if it is greater than ; otherwise, .
To be added.
+ The type of the elements in the vector () is not supported.
@@ -7882,11 +7910,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compares two vectors, on a per-element basis, to compute which is greater and returning the other value if an element is NaN.
+ A vector where the corresponding element comes from if it is greater than ; otherwise, .
+ For this method matches the IEEE 754:2019 maximumNumber function. This requires NaN inputs to not be propagated back to the caller and for -0.0 to be treated as less than +0.0.
+ The type of the elements in the vector () is not supported.
@@ -7950,7 +7979,7 @@ This method may bypass the cache on certain platforms.
The vector to compare with .
Computes the minimum of two vectors on a per-element basis.
A vector whose elements are the minimum of the corresponding elements in and .
- To be added.
+ For this method matches the IEEE 754:2019 minimum function.This requires NaN inputs to be propagated back to the caller and for -0.0 to be treated as less than +0.0.
The type of and () is not supported.
@@ -8004,11 +8033,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compares two vectors to compute which has the lesser magnitude on a per-element basis.
+ A vector where the corresponding element comes from if it has a lesser magnitude than ; otherwise, .
+ For this method matches the IEEE 754:2019 minimumMagnitude function. This requires NaN inputs to be propagated back to the caller and for -0.0 to be treated as less than +0.0.
+ The type of the elements in the vector () is not supported.
@@ -8061,11 +8091,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compares two vectors, on a per-element basis, to compute which has the lesser magnitude and returning the other value if an input is NaN.
+ A vector where the corresponding element comes from if it has a lesser magnitude than ; otherwise, .
+ For this method matches the IEEE 754:2019 minimumMagnitudeNumber function. This requires NaN inputs to not be propagated back to the caller and for -0.0 to be treated as less than +0.0.
+ The type of the elements in the vector () is not supported.
@@ -8118,11 +8149,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compare two vectors to determine which is lesser on a per-element basis using platform specific behavior for NaN and NegativeZero.
+ A vector where the corresponding element comes from if it is lesser than ; otherwise, .
To be added.
+ The type of the elements in the vector () is not supported.
@@ -8175,11 +8207,12 @@ This method may bypass the cache on certain platforms.
To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to compare with .
+ The vector to compare with .
+ Compares two vectors, on a per-element basis, to compute which is lesser and returning the other value if an element is NaN.
+ A vector where the corresponding element comes from if it is lesser than ; otherwise, .
+ For this method matches the IEEE 754:2019 minimumNumber function. This requires NaN inputs to not be propagated back to the caller and for -0.0 to be treated as less than +0.0.
+ The type of the elements in the vector () is not supported.
@@ -8402,6 +8435,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8431,6 +8465,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8795,6 +8830,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8820,6 +8856,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8845,6 +8882,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8870,6 +8908,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8897,6 +8936,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8924,6 +8964,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10054,6 +10095,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10079,6 +10121,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10110,6 +10153,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10141,6 +10185,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10873,6 +10918,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10898,6 +10944,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
diff --git a/xml/System.Runtime.Intrinsics/Vector128`1.xml b/xml/System.Runtime.Intrinsics/Vector128`1.xml
index 1a92644f6ab..0adc4d1ab20 100644
--- a/xml/System.Runtime.Intrinsics/Vector128`1.xml
+++ b/xml/System.Runtime.Intrinsics/Vector128`1.xml
@@ -265,9 +265,10 @@
System.Runtime.Intrinsics.Vector128<T>
- To be added.
+ Gets a new with the elements set to their index.
To be added.
To be added.
+ The type of the vector () is not supported.
diff --git a/xml/System.Runtime.Intrinsics/Vector256.xml b/xml/System.Runtime.Intrinsics/Vector256.xml
index d738bea6778..96592d84ef3 100644
--- a/xml/System.Runtime.Intrinsics/Vector256.xml
+++ b/xml/System.Runtime.Intrinsics/Vector256.xml
@@ -1328,6 +1328,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1355,6 +1356,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1421,6 +1423,7 @@
To be added.
To be added.
To be added.
+
@@ -1487,6 +1490,7 @@
To be added.
To be added.
To be added.
+
@@ -1669,9 +1673,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1721,9 +1725,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1845,9 +1849,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1909,9 +1913,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1970,6 +1974,7 @@
To be added.
To be added.
To be added.
+
@@ -2196,6 +2201,7 @@
To be added.
To be added.
To be added.
+
@@ -2221,6 +2227,7 @@
To be added.
To be added.
To be added.
+
@@ -3901,11 +3908,12 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that the lower and upper 128-bits will be initialized to.
+ Creates a new instance with the lower and upper 128-bits initialized to a specified value.
+ A new with the lower and upper 128-bits initialized to .
To be added.
+ The type of () is not supported.
@@ -3949,11 +3957,12 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that the 64-bit parts will be initialized to.
+ Creates a new instance with all 64-bit parts initialized to a specified value.
+ A new with the 64-bit parts initialized to .
To be added.
+ The type of () is not supported.
@@ -5118,11 +5127,11 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that element 0 will be initialized to.
+ The value that indicates how far apart each element should be from the previous.
+ Creates a new instance where the elements begin at a specified value and which are spaced apart according to another specified value.
+ A new instance with the first element initialized to and each subsequent element initialized to the value of the previous element plus .
To be added.
@@ -5149,6 +5158,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5174,6 +5184,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5590,6 +5601,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5615,6 +5627,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5702,6 +5715,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -5729,6 +5743,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -5758,6 +5773,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -5787,6 +5803,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6398,6 +6415,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6425,6 +6443,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6505,6 +6524,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6553,6 +6573,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6601,6 +6622,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6649,6 +6671,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6697,6 +6720,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6726,6 +6750,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -6755,6 +6780,7 @@ On x86, this method corresponds to __m256i _mm256_setr_epi8
To be added.
To be added.
To be added.
+
@@ -7486,6 +7512,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7511,6 +7538,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7536,6 +7564,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7561,6 +7590,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7626,6 +7656,7 @@ This method may bypass the cache on certain platforms.
A vector whose elements are the maximum of the corresponding elements in and .
To be added.
The type of and () is not supported.
+
@@ -7683,6 +7714,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7740,6 +7772,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7797,6 +7830,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7854,6 +7888,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7919,6 +7954,7 @@ This method may bypass the cache on certain platforms.
A vector whose elements are the minimum of the corresponding elements in and .
To be added.
The type of and () is not supported.
+
@@ -7976,6 +8012,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8033,6 +8070,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8090,6 +8128,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8147,6 +8186,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8369,6 +8409,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8398,6 +8439,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8762,6 +8804,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8787,6 +8830,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8812,6 +8856,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8837,6 +8882,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8864,6 +8910,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -8891,6 +8938,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10021,6 +10069,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10046,6 +10095,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10077,6 +10127,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10108,6 +10159,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10820,6 +10872,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -10845,6 +10898,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
diff --git a/xml/System.Runtime.Intrinsics/Vector256`1.xml b/xml/System.Runtime.Intrinsics/Vector256`1.xml
index 51d4ba81b96..29c99441aa1 100644
--- a/xml/System.Runtime.Intrinsics/Vector256`1.xml
+++ b/xml/System.Runtime.Intrinsics/Vector256`1.xml
@@ -265,9 +265,10 @@
System.Runtime.Intrinsics.Vector256<T>
- To be added.
+ Gets a new with the elements set to their index.
To be added.
To be added.
+ The type of the vector () is not supported.
diff --git a/xml/System.Runtime.Intrinsics/Vector512.xml b/xml/System.Runtime.Intrinsics/Vector512.xml
index f6cb59dc1a7..89fe97e743c 100644
--- a/xml/System.Runtime.Intrinsics/Vector512.xml
+++ b/xml/System.Runtime.Intrinsics/Vector512.xml
@@ -1066,6 +1066,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1092,6 +1093,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1158,6 +1160,7 @@
To be added.
To be added.
To be added.
+
@@ -1224,6 +1227,7 @@
To be added.
To be added.
To be added.
+
@@ -1397,9 +1401,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1448,9 +1452,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1569,9 +1573,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1632,9 +1636,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1693,6 +1697,7 @@
To be added.
To be added.
To be added.
+
@@ -1885,6 +1890,7 @@
To be added.
To be added.
To be added.
+
@@ -1910,6 +1916,7 @@
To be added.
To be added.
To be added.
+
@@ -3504,11 +3511,12 @@
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that the 128-bit parts will be initialized to.
+ Creates a new instance with all 128-bit parts initialized to a specified value.
+ A new with the 128-bit parts initialized to .
To be added.
+ The type of () is not supported.
@@ -3552,11 +3560,12 @@
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that the lower and upper 256-bits will be initialized to.
+ Creates a new instance with the lower and upper 256-bits initialized to a specified value.
+ A new with the lower and upper 256-bits initialized to .
To be added.
+ The type of () is not supported.
@@ -3600,11 +3609,12 @@
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that the 64-bit parts will be initialized to.
+ Creates a new instance with all 64-bit parts initialized to a specified value.
+ A new with the 64-bit parts initialized to .
To be added.
+ The type of () is not supported.
@@ -4650,11 +4660,11 @@
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that element 0 will be initialized to.
+ The value that indicates how far apart each element should be from the previous.
+ Creates a new instance where the elements begin at a specified value and which are spaced apart according to another specified value.
+ A new instance with the first element initialized to and each subsequent element initialized to the value of the previous element plus .
To be added.
@@ -4681,6 +4691,7 @@
To be added.
To be added.
To be added.
+
@@ -4706,6 +4717,7 @@
To be added.
To be added.
To be added.
+
@@ -5075,6 +5087,7 @@
To be added.
To be added.
To be added.
+
@@ -5100,6 +5113,7 @@
To be added.
To be added.
To be added.
+
@@ -5174,6 +5188,7 @@
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -5200,6 +5215,7 @@
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -5229,6 +5245,7 @@
To be added.
To be added.
To be added.
+
@@ -5258,6 +5275,7 @@
To be added.
To be added.
To be added.
+
@@ -5774,6 +5792,7 @@
To be added.
To be added.
To be added.
+
@@ -5801,6 +5820,7 @@
To be added.
To be added.
To be added.
+
@@ -5872,6 +5892,7 @@
To be added.
To be added.
To be added.
+
@@ -5920,6 +5941,7 @@
To be added.
To be added.
To be added.
+
@@ -5968,6 +5990,7 @@
To be added.
To be added.
To be added.
+
@@ -6016,6 +6039,7 @@
To be added.
To be added.
To be added.
+
@@ -6064,6 +6088,7 @@
To be added.
To be added.
To be added.
+
@@ -6093,6 +6118,7 @@
To be added.
To be added.
To be added.
+
@@ -6122,6 +6148,7 @@
To be added.
To be added.
To be added.
+
@@ -6760,6 +6787,7 @@
To be added.
To be added.
To be added.
+
@@ -6785,6 +6813,7 @@
To be added.
To be added.
To be added.
+
@@ -6810,6 +6839,7 @@
To be added.
To be added.
To be added.
+
@@ -6835,6 +6865,7 @@
To be added.
To be added.
To be added.
+
@@ -6894,6 +6925,7 @@
A vector whose elements are the maximum of the corresponding elements in and .
To be added.
The type of and () is not supported.
+
@@ -6951,6 +6983,7 @@
To be added.
To be added.
To be added.
+
@@ -7008,6 +7041,7 @@
To be added.
To be added.
To be added.
+
@@ -7065,6 +7099,7 @@
To be added.
To be added.
To be added.
+
@@ -7122,6 +7157,7 @@
To be added.
To be added.
To be added.
+
@@ -7181,6 +7217,7 @@
A vector whose elements are the minimum of the corresponding elements in and .
To be added.
The type of and () is not supported.
+
@@ -7238,6 +7275,7 @@
To be added.
To be added.
To be added.
+
@@ -7295,6 +7333,7 @@
To be added.
To be added.
To be added.
+
@@ -7352,6 +7391,7 @@
To be added.
To be added.
To be added.
+
@@ -7409,6 +7449,7 @@
To be added.
To be added.
To be added.
+
@@ -7615,6 +7656,7 @@
To be added.
To be added.
To be added.
+
@@ -7644,6 +7686,7 @@
To be added.
To be added.
To be added.
+
@@ -7989,6 +8032,7 @@
To be added.
To be added.
To be added.
+
@@ -8014,6 +8058,7 @@
To be added.
To be added.
To be added.
+
@@ -8039,6 +8084,7 @@
To be added.
To be added.
To be added.
+
@@ -8064,6 +8110,7 @@
To be added.
To be added.
To be added.
+
@@ -8091,6 +8138,7 @@
To be added.
To be added.
To be added.
+
@@ -8118,6 +8166,7 @@
To be added.
To be added.
To be added.
+
@@ -9213,6 +9262,7 @@
To be added.
To be added.
To be added.
+
@@ -9238,6 +9288,7 @@
To be added.
To be added.
To be added.
+
@@ -9269,6 +9320,7 @@
To be added.
To be added.
To be added.
+
@@ -9300,6 +9352,7 @@
To be added.
To be added.
To be added.
+
@@ -9811,6 +9864,7 @@
To be added.
To be added.
To be added.
+
@@ -9836,6 +9890,7 @@
To be added.
To be added.
To be added.
+
diff --git a/xml/System.Runtime.Intrinsics/Vector512`1.xml b/xml/System.Runtime.Intrinsics/Vector512`1.xml
index 53fa2d360c0..ba23d0415f4 100644
--- a/xml/System.Runtime.Intrinsics/Vector512`1.xml
+++ b/xml/System.Runtime.Intrinsics/Vector512`1.xml
@@ -224,9 +224,10 @@
System.Runtime.Intrinsics.Vector512<T>
- To be added.
+ Gets a new with the elements set to their index.
To be added.
To be added.
+ The type of the vector () is not supported.
diff --git a/xml/System.Runtime.Intrinsics/Vector64.xml b/xml/System.Runtime.Intrinsics/Vector64.xml
index d6c75376750..3f054cab584 100644
--- a/xml/System.Runtime.Intrinsics/Vector64.xml
+++ b/xml/System.Runtime.Intrinsics/Vector64.xml
@@ -1210,6 +1210,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1237,6 +1238,7 @@
Computes the ceiling of each element in a vector.
A vector whose elements are the ceiling of the elements in .
To be added.
+
@@ -1303,6 +1305,7 @@
To be added.
To be added.
To be added.
+
@@ -1369,6 +1372,7 @@
To be added.
To be added.
To be added.
+
@@ -1551,9 +1555,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1603,9 +1607,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1727,9 +1731,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1791,9 +1795,9 @@
- To be added.
- To be added.
- To be added.
+ The vector to convert.
+ Converts a to a using platform specific behavior on overflow.
+ The converted vector.
To be added.
@@ -1852,6 +1856,7 @@
To be added.
To be added.
To be added.
+
@@ -2074,9 +2079,9 @@
- To be added.
- To be added.
- To be added.
+ The vector that will have its Cos computed.
+ Computes the cos of each element in a vector.
+ A vector whose elements are the cos of the elements in .
To be added.
@@ -2099,9 +2104,9 @@
- To be added.
- To be added.
- To be added.
+ The vector that will have its Cos computed.
+ Computes the cos of each element in a vector.
+ A vector whose elements are the cos of the elements in .
To be added.
@@ -4014,11 +4019,11 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The type of the elements in the vector.
+ The value that element 0 will be initialized to.
+ The value that indicates how far apart each element should be from the previous.
+ Creates a new instance where the elements begin at a specified value and which are spaced apart according to another specified value.
+ A new instance with the first element initialized to and each subsequent element initialized to the value of the previous element plus .
To be added.
@@ -4041,9 +4046,9 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
+ The vector to convert to radians.
+ Converts a given vector from degrees to radians.
+ The vector of converted to radians.
To be added.
@@ -4066,9 +4071,9 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
+ The vector to convert to radians.
+ Converts a given vector from degrees to radians.
+ The vector of converted to radians.
To be added.
@@ -4482,9 +4487,9 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
+ The vector that will have its Exp computed.
+ Computes the exp of each element in a vector.
+ A vector whose elements are the exp of the elements in .
To be added.
@@ -4507,9 +4512,9 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
+ The vector that will have its exp computed.
+ Computes the exp of each element in a vector.
+ A vector whose elements are the exp of the elements in .
To be added.
@@ -4598,6 +4603,7 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -4625,6 +4631,7 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
Computes the floor of each element in a vector.
A vector whose elements are the floor of the elements in .
To be added.
+
@@ -4648,12 +4655,15 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be multiplied with .
+ The vector to be multiplied with .
+ The vector to be added to the result of multiplied by .
+ Computes ( * ) + , rounded as one ternary operation.
+ ( * ) + , rounded as one ternary operation.
+
+ This computes ( * ) as if to infinite precision, adds to that result as if to infinite precision, and finally rounds to the nearest representable value.
+ This differs from the non-fused sequence which would compute ( * ) as if to infinite precision, round the result to the nearest representable value, add to the rounded result as if to infinite precision, and finally round to the nearest representable value.
+
@@ -4677,12 +4687,15 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be multiplied with .
+ The vector to be multiplied with .
+ The vector to be added to the result of multiplied by .
+ Computes ( * ) + , rounded as one ternary operation.
+ ( * ) + , rounded as one ternary operation.
+
+ This computes ( * ) as if to infinite precision, adds to that result as if to infinite precision, and finally rounds to the nearest representable value.
+ This differs from the non-fused sequence which would compute ( * ) as if to infinite precision, round the result to the nearest representable value, add to the rounded result as if to infinite precision, and finally round to the nearest representable value.
+
@@ -5169,10 +5182,10 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to square and add to .
+ The vector to square and add to .
+ Computes the hypotenuse given two vectors representing the lengths of the shorter sides in a right-angled triangle.
+ The square root of -squared plus -squared.
To be added.
@@ -5196,10 +5209,10 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to square and add to .
+ The vector to square and add to .
+ Computes the hypotenuse given two vectors representing the lengths of the shorter sides in a right-angled triangle.
+ The square root of -squared plus -squared.
To be added.
@@ -5281,6 +5294,7 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
To be added.
To be added.
To be added.
+
@@ -5329,6 +5343,7 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
To be added.
To be added.
To be added.
+
@@ -5377,6 +5392,7 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
To be added.
To be added.
To be added.
+
@@ -5425,6 +5441,7 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
To be added.
To be added.
To be added.
+
@@ -5473,6 +5490,7 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
To be added.
To be added.
To be added.
+
@@ -5496,11 +5514,11 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The first vector.
+ The second vector.
+ A value between 0 and 1 that indicates the weight of .
+ Performs a linear interpolation between two vectors based on the given weighting.
+ The interpolated vector.
To be added.
@@ -5525,11 +5543,11 @@ On x86, this method corresponds to __m64 _mm_setr_pi8
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The first vector.
+ The second vector.
+ A value between 0 and 1 that indicates the weight of .
+ Performs a linear interpolation between two vectors based on the given weighting.
+ The interpolated vector.
To be added.
@@ -6258,9 +6276,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its log computed.
+ Computes the log of each element in a vector.
+ A vector whose elements are the log of the elements in .
To be added.
@@ -6283,9 +6301,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its log computed.
+ Computes the log of each element in a vector.
+ A vector whose elements are the log of the elements in .
To be added.
@@ -6308,9 +6326,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its log2 computed.
+ Computes the log2 of each element in a vector.
+ A vector whose elements are the log2 of the elements in .
To be added.
@@ -6333,9 +6351,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its log2 computed.
+ Computes the log2 of each element in a vector.
+ A vector whose elements are the log2 of the elements in .
To be added.
@@ -6402,6 +6420,7 @@ This method may bypass the cache on certain platforms.
A vector whose elements are the maximum of the corresponding elements in and .
To be added.
The type of and () is not supported.
+
@@ -6459,6 +6478,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -6516,6 +6536,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -6573,6 +6594,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -6630,6 +6652,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -6695,6 +6718,7 @@ This method may bypass the cache on certain platforms.
A vector whose elements are the minimum of the corresponding elements in and .
To be added.
The type of and () is not supported.
+
@@ -6752,6 +6776,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -6809,6 +6834,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -6866,6 +6892,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -6923,6 +6950,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7139,12 +7167,15 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be multiplied with .
+ The vector to be multiplied with .
+ The vector to be added to the result of multiplied by .
+ Computes an estimate of ( * ) + .
+ An estimate of ( * ) + .
+
+ On hardware that natively supports , this may return a result that was rounded as one ternary operation.
+ On hardware without specialized support, this may just return ( * ) + .
+
@@ -7168,12 +7199,15 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to be multiplied with .
+ The vector to be multiplied with .
+ The vector to be added to the result of multiplied by .
+ Computes an estimate of ( * ) + .
+ An estimate of ( * ) + .
+
+ On hardware that natively supports , this may return a result that was rounded as one ternary operation.
+ On hardware without specialized support, this may just return ( * ) + .
+
@@ -7534,9 +7568,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector to convert to degrees.
+ Converts a given vector from radians to degrees.
+ The vector of converted to degrees.
To be added.
@@ -7559,9 +7593,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector to convert to degrees.
+ Converts a given vector from radians to degrees.
+ The vector of converted to degrees.
To be added.
@@ -7588,6 +7622,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7613,6 +7648,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -7635,10 +7671,10 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to round.
+ The mode under which should be rounded.
+ Rounds each element in a vector to the nearest integer using the specified rounding mode.
+ The result of rounding each element in to the nearest integer using .
To be added.
@@ -7662,10 +7698,10 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
- To be added.
+ The vector to round.
+ The mode under which should be rounded.
+ Rounds each element in a vector to the nearest integer using the specified rounding mode.
+ The result of rounding each element in to the nearest integer using .
To be added.
@@ -8700,9 +8736,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its Sin computed.
+ Computes the sin of each element in a vector.
+ A vector whose elements are the sin of the elements in .
To be added.
@@ -8725,9 +8761,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its Sin computed.
+ Computes the sin of each element in a vector.
+ A vector whose elements are the sin of the elements in .
To be added.
@@ -8756,9 +8792,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its SinCos computed.
+ Computes the sincos of each element in a vector.
+ A vector whose elements are the sincos of the elements in .
To be added.
@@ -8787,9 +8823,9 @@ This method may bypass the cache on certain platforms.
- To be added.
- To be added.
- To be added.
+ The vector that will have its SinCos computed.
+ Computes the sincos of each element in a vector.
+ A vector whose elements are the sincos of the elements in .
To be added.
@@ -9523,6 +9559,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
@@ -9548,6 +9585,7 @@ This method may bypass the cache on certain platforms.
To be added.
To be added.
To be added.
+
diff --git a/xml/System.Runtime.Intrinsics/Vector64`1.xml b/xml/System.Runtime.Intrinsics/Vector64`1.xml
index eb9fdb5bac7..475e332ac72 100644
--- a/xml/System.Runtime.Intrinsics/Vector64`1.xml
+++ b/xml/System.Runtime.Intrinsics/Vector64`1.xml
@@ -265,9 +265,10 @@
System.Runtime.Intrinsics.Vector64<T>
- To be added.
+ Gets a new with the elements set to their index.
To be added.
To be added.
+ The type of the vector () is not supported.